palm-api
Version:
A PaLM API for JS & TS
131 lines (130 loc) • 3.8 kB
TypeScript
import { Format, GenerateTextConfig, AskConfig, ResponseByFormat, GenerateTextResponse, AskResponse, EmbedTextConfig, CreateChatConfig, ChatAskConfig, Message } from "./google-ai-types";
/**
* PaLM API.
*
* @class PaLM
* @typedef {PaLM}
*/
declare class PaLM {
#private;
/**
* Available response formats.
*
* @public
* @static
* @type {{ readonly JSON: "json"; readonly MD: "markdown"; }}
*/
static readonly FORMATS: {
readonly JSON: "json";
readonly MD: "markdown";
};
/**
* API key.
*
* @private
* @type {string}
*/
private key;
/**
* @constructor
* @public
* @param {string} key
* @param {{ fetch?: typeof fetch }} [rawConfig={}]
*/
constructor(key: string, rawConfig?: {
fetch?: typeof fetch;
});
/**
* Uses the `generateText` capable models to let PaLM generate text.
*
* @public
* @async
* @template {Format} [F='markdown'] response format.
* @param {string} message
* @param {Partial<GenerateTextConfig<F>>} [rawConfig={}]
* @returns {Promise<GeneratedTextResponseFormat[F]>}
*/
generateText<F extends Format = "markdown">(message: string, rawConfig?: Partial<GenerateTextConfig<F>>): Promise<ResponseByFormat<GenerateTextResponse>[F]>;
/**
* Uses the `generateMessage` capable models to provide a high-quality LLM experience, with context, examples, and more.
*
* @public
* @async
* @template {Format} [F='markdown'] response format.
* @param {string} message
* @param {Partial<AskConfig<F>>} [rawConfig={}]
* @returns {Promise<ResponseByFormat<AskResponse>[F]>}
*/
ask<F extends Format = "markdown">(message: string, rawConfig?: Partial<AskConfig<F>>): Promise<ResponseByFormat<AskResponse>[F]>;
/**
* Uses PaLM to embed your text into a float matrix with `embedText` enabled models, that you can use for various complex tasks.
*
* @public
* @async
* @param {string} message
* @param {Partial<EmbedTextConfig>} [rawConfig={}]
* @returns {Promise<number[]>}
*/
embed(message: string, rawConfig?: Partial<EmbedTextConfig>): Promise<number[]>;
/**
* Uses `generateMessage` capable models to create a chat interface that's simple, fast, and easy to use.
*
* @public
* @param {Partial<CreateChatConfig>} [rawConfig={}]
* @returns {Chat}
*/
createChat(rawConfig?: Partial<CreateChatConfig>): Chat;
}
/**
* Chat wrapper interface.
*
* @export
* @class Chat
* @typedef {Chat}
*/
declare class Chat {
/**
* `PaLM` instance.
*
* @private
* @type {PaLM}
*/
private PaLM;
/**
* Chat creation configuration.
*
* @private
* @type {CreateChatConfig}
*/
private config;
/**
* Message hystory.
*
* @private
* @type {Message[]}
*/
private messages;
/**
* @constructor
* @param {PaLM} PaLM
* @param {Partial<CreateChatConfig>} [rawConfig={}]
*/
constructor(PaLM: PaLM, rawConfig?: Partial<CreateChatConfig>);
/**
* Same as {@link PaLM.ask()} but remembers previous messages and responses, to enable continued conversations.
*
* @async
* @template {Format} [F='markdown'] response format.
* @param {string} message
* @param {Partial<ChatAskConfig<F>>} [rawConfig={}]
* @returns {Promise<ResponseByFormat<AskResponse>[F]>}
*/
ask<F extends Format = "markdown">(message: string, rawConfig?: Partial<ChatAskConfig<F>>): Promise<ResponseByFormat<AskResponse>[F]>;
/**
* Exports the message hystory.
*
* @returns {Message[]}
*/
export(): Message[];
}
export default PaLM;