@emergentmethods/asknews-typescript-sdk
Version:
Typescript SDK for AskNews API
238 lines (201 loc) • 9.89 kB
text/typescript
/* tslint:disable */
/* eslint-disable */
/**
* AskNews API
* AskNews API
*
* The version of the OpenAPI document: 0.21.1
* Contact: contact@emergentmethods.ai
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import * as runtime from '../runtime';
import type {
AbcAPIErrorModel21,
AbcAPIErrorModel22,
AbcAPIErrorModel23,
AbcAPIErrorModel24,
AbcAPIErrorModel25,
AbcAPIErrorModel26,
AbcAPIErrorModel27,
AbcAPIErrorModel28,
AsknewsApiErrorsAPIErrorModel,
CreateChatCompletionRequest,
CreateChatCompletionResponse1,
CreateChatCompletionResponseStream1,
CreateDeepNewsRequest,
CreateDeepNewsResponse1,
CreateDeepNewsResponseStreamChunk1,
HTTPValidationError,
ListModelResponse,
ValidationErrorModel,
} from '../models/index';
import {
AbcAPIErrorModel21FromJSON,
AbcAPIErrorModel21ToJSON,
AbcAPIErrorModel22FromJSON,
AbcAPIErrorModel22ToJSON,
AbcAPIErrorModel23FromJSON,
AbcAPIErrorModel23ToJSON,
AbcAPIErrorModel24FromJSON,
AbcAPIErrorModel24ToJSON,
AbcAPIErrorModel25FromJSON,
AbcAPIErrorModel25ToJSON,
AbcAPIErrorModel26FromJSON,
AbcAPIErrorModel26ToJSON,
AbcAPIErrorModel27FromJSON,
AbcAPIErrorModel27ToJSON,
AbcAPIErrorModel28FromJSON,
AbcAPIErrorModel28ToJSON,
AsknewsApiErrorsAPIErrorModelFromJSON,
AsknewsApiErrorsAPIErrorModelToJSON,
CreateChatCompletionRequestFromJSON,
CreateChatCompletionRequestToJSON,
CreateChatCompletionResponse1FromJSON,
CreateChatCompletionResponse1ToJSON,
CreateChatCompletionResponseStream1FromJSON,
CreateChatCompletionResponseStream1ToJSON,
CreateDeepNewsRequestFromJSON,
CreateDeepNewsRequestToJSON,
CreateDeepNewsResponse1FromJSON,
CreateDeepNewsResponse1ToJSON,
CreateDeepNewsResponseStreamChunk1FromJSON,
CreateDeepNewsResponseStreamChunk1ToJSON,
HTTPValidationErrorFromJSON,
HTTPValidationErrorToJSON,
ListModelResponseFromJSON,
ListModelResponseToJSON,
ValidationErrorModelFromJSON,
ValidationErrorModelToJSON,
} from '../models/index';
export interface DeepNewsRequest {
createDeepNewsRequest: CreateDeepNewsRequest;
}
export interface GetChatCompletionsRequest {
createChatCompletionRequest: CreateChatCompletionRequest;
}
export interface GetHeadlineQuestionsRequest {
queries?: Array<string>;
}
/**
*
*/
export class ChatApi extends runtime.BaseAPI {
/**
* Deep research into real-time news, archive news, and Google.
*/
async deepNewsRaw(requestParameters: DeepNewsRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<runtime.ApiResponse<CreateDeepNewsResponse1> | runtime.StreamApiResponse> {
if (requestParameters['createDeepNewsRequest'] == null) {
throw new runtime.RequiredError(
'createDeepNewsRequest',
'Required parameter "createDeepNewsRequest" was null or undefined when calling deepNews().'
);
}
const queryParameters: any = {};
const headerParameters: runtime.HTTPHeaders = {};
headerParameters['Content-Type'] = 'application/json';
const response = await this.request({
path: `/v1/chat/deepnews`,
method: 'POST',
headers: headerParameters,
query: queryParameters,
body: CreateDeepNewsRequestToJSON(requestParameters['createDeepNewsRequest']),
}, initOverrides);
if ('createDeepNewsRequest' in requestParameters && requestParameters['createDeepNewsRequest'] != null && 'stream' in requestParameters['createDeepNewsRequest'] && requestParameters['createDeepNewsRequest']['stream'] === true) {
return new runtime.StreamApiResponse(response);
}
return new runtime.JSONApiResponse(response, (jsonValue) => CreateDeepNewsResponse1FromJSON(jsonValue));
}
/**
* Deep research into real-time news, archive news, and Google.
*/
async deepNews(requestParameters: DeepNewsRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<CreateDeepNewsResponse1 | ReadableStream<any>> {
const response = await this.deepNewsRaw(requestParameters, initOverrides);
return await response.value();
}
/**
* Get the chat completions for a given user message. This endpoint follows the OpenAI API spec. It includes a couple extra params, which include: - **journalist_mode**: Whether to activate an auto prompt that is more keen on AP styling, citations, and fair reporting. Setting to false, you get a vanilla LLM with the news pre added to the system prompt. No other prompting. - **inline_citations**: Decides how you want the bot to cite sources. It can use brackets, or it can also include the markdown with URL automatically. - **asknews_watermark**: Whether to include the AskNews watermark in the response.
* Get chat completions from a news-infused AI assistant
*/
async getChatCompletionsRaw(requestParameters: GetChatCompletionsRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<runtime.ApiResponse<CreateChatCompletionResponse1> | runtime.StreamApiResponse> {
if (requestParameters['createChatCompletionRequest'] == null) {
throw new runtime.RequiredError(
'createChatCompletionRequest',
'Required parameter "createChatCompletionRequest" was null or undefined when calling getChatCompletions().'
);
}
const queryParameters: any = {};
const headerParameters: runtime.HTTPHeaders = {};
headerParameters['Content-Type'] = 'application/json';
const response = await this.request({
path: `/v1/openai/chat/completions`,
method: 'POST',
headers: headerParameters,
query: queryParameters,
body: CreateChatCompletionRequestToJSON(requestParameters['createChatCompletionRequest']),
}, initOverrides);
if ('createChatCompletionRequest' in requestParameters && requestParameters['createChatCompletionRequest'] != null && 'stream' in requestParameters['createChatCompletionRequest'] && requestParameters['createChatCompletionRequest']['stream'] === true) {
return new runtime.StreamApiResponse(response);
}
return new runtime.JSONApiResponse(response, (jsonValue) => CreateChatCompletionResponse1FromJSON(jsonValue));
}
/**
* Get the chat completions for a given user message. This endpoint follows the OpenAI API spec. It includes a couple extra params, which include: - **journalist_mode**: Whether to activate an auto prompt that is more keen on AP styling, citations, and fair reporting. Setting to false, you get a vanilla LLM with the news pre added to the system prompt. No other prompting. - **inline_citations**: Decides how you want the bot to cite sources. It can use brackets, or it can also include the markdown with URL automatically. - **asknews_watermark**: Whether to include the AskNews watermark in the response.
* Get chat completions from a news-infused AI assistant
*/
async getChatCompletions(requestParameters: GetChatCompletionsRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<CreateChatCompletionResponse1 | ReadableStream<any>> {
const response = await this.getChatCompletionsRaw(requestParameters, initOverrides);
return await response.value();
}
/**
* Get the headline example questions related to the given queries.
* Get example headline questions
*/
async getHeadlineQuestionsRaw(requestParameters: GetHeadlineQuestionsRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<runtime.ApiResponse<{ [key: string]: Array<string>; }> > {
const queryParameters: any = {};
if (requestParameters['queries'] != null) {
queryParameters['queries'] = requestParameters['queries'];
}
const headerParameters: runtime.HTTPHeaders = {};
const response = await this.request({
path: `/v1/chat/questions`,
method: 'GET',
headers: headerParameters,
query: queryParameters,
}, initOverrides);
return new runtime.JSONApiResponse<any>(response);
}
/**
* Get the headline example questions related to the given queries.
* Get example headline questions
*/
async getHeadlineQuestions(requestParameters: GetHeadlineQuestionsRequest = {}, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<{ [key: string]: Array<string>; }> {
const response = await this.getHeadlineQuestionsRaw(requestParameters, initOverrides);
return await response.value();
}
/**
* List the available chat models.
* List available chat models
*/
async listChatModelsRaw(initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<runtime.ApiResponse<ListModelResponse> > {
const queryParameters: any = {};
const headerParameters: runtime.HTTPHeaders = {};
const response = await this.request({
path: `/v1/openai/models`,
method: 'GET',
headers: headerParameters,
query: queryParameters,
}, initOverrides);
return new runtime.JSONApiResponse(response, (jsonValue) => ListModelResponseFromJSON(jsonValue));
}
/**
* List the available chat models.
* List available chat models
*/
async listChatModels(initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<ListModelResponse> {
const response = await this.listChatModelsRaw(initOverrides);
return await response.value();
}
}