UNPKG

@emergentmethods/asknews-typescript-sdk

Version:
75 lines (74 loc) 3.93 kB
/* tslint:disable */ /* eslint-disable */ /** * AskNews API * AskNews API [![status](https://status.asknews.app/api/badge/2/status?style=for-the-badge)](https://status.asknews.app/status/prod) * * The version of the OpenAPI document: 0.24.22 * Contact: contact@emergentmethods.ai * * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * https://openapi-generator.tech * Do not edit the class manually. */ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; import * as runtime from '../runtime'; import { WebSearchResponseFromJSON, } from '../models/index'; /** * */ export class WebsearchApi extends runtime.BaseAPI { /** * Run a live websearch on a set of queries, get back a fully structured and LLM-distilled response (in addition to the raw text if you need that as well). Your response includes as_string and as_dicts, where as_string is a prompt-optimized distillation of the information, done by an LLM. as_dicts contains all the details necessary to feed into other parts of your application. * Run a live websearch. */ liveWebSearchRaw(requestParameters, initOverrides) { return __awaiter(this, void 0, void 0, function* () { if (requestParameters['queries'] == null) { throw new runtime.RequiredError('queries', 'Required parameter "queries" was null or undefined when calling liveWebSearch().'); } const queryParameters = {}; if (requestParameters['queries'] != null) { queryParameters['queries'] = requestParameters['queries']; } if (requestParameters['lookback'] != null) { queryParameters['lookback'] = requestParameters['lookback']; } if (requestParameters['domains'] != null) { queryParameters['domains'] = requestParameters['domains']; } if (requestParameters['strict'] != null) { queryParameters['strict'] = requestParameters['strict']; } if (requestParameters['offset'] != null) { queryParameters['offset'] = requestParameters['offset']; } const headerParameters = {}; const response = yield this.request({ path: `/v1/chat/websearch`, method: 'GET', headers: headerParameters, query: queryParameters, }, initOverrides); return new runtime.JSONApiResponse(response, (jsonValue) => WebSearchResponseFromJSON(jsonValue)); }); } /** * Run a live websearch on a set of queries, get back a fully structured and LLM-distilled response (in addition to the raw text if you need that as well). Your response includes as_string and as_dicts, where as_string is a prompt-optimized distillation of the information, done by an LLM. as_dicts contains all the details necessary to feed into other parts of your application. * Run a live websearch. */ liveWebSearch(requestParameters, initOverrides) { return __awaiter(this, void 0, void 0, function* () { const response = yield this.liveWebSearchRaw(requestParameters, initOverrides); return yield response.value(); }); } }