gpt3rocket
Version:
Little helper utility for priming + transacting w/ GPT3 api
127 lines (126 loc) • 4.93 kB
TypeScript
export interface APIResponseChoice {
text: string;
index: number;
logprobs?: number;
finish_reason: string;
}
export interface APIResponse {
id: string;
object: string;
created: number;
model: string;
choices: APIResponseChoice[];
}
export declare type Sample = string[][];
export declare type Samples = [string, string] | [string, string][] | [] | string[][];
export interface APIFlags {
engine?: string;
prompt?: string;
max_tokens?: number;
temperature?: number;
top_p?: number;
n?: number;
stream?: boolean;
logprobs?: number;
stop?: string;
[key: string]: any;
}
export interface APIConfig {
full_response?: boolean;
endpoint?: string;
}
export interface RootConfig {
samples?: Samples;
prefix?: string;
credential: string;
APIConfig?: APIConfig;
APIFlags?: APIFlags;
transform?: any;
inputString?: string;
outputString?: string;
debug?: boolean;
}
/**
* ## Opts: Samples & prefix
* Samples & a prefix string will prime your agent
*
* ### opts.samples (optional)
*
* *array of Samples*
*
* ```ts
* const samples = [['marco', 'polo'], ['marrrrrccccoo', 'pollllooo']
* ```
* ### opts.prefix (optional)
* String to prepend to top of message as "primer"
*
* *string*
*
*```ts
* const prefix = 'The following is an exchange between the user and an intelligent agent. The agent is friendly, prompt, and wants to help the
* ```
*
## Transform (optional)
* An optional function to adjust how the prefix & samples are structured when sent to API
*
* Receives samples, prefix, inputString, outputString
* Without a custom function, a template will look like the following
*
* ```
* Prefix phrase ____
* input: aaa
* output: bbb
* input: ${user_prompt_goes_here}
*```
*
*
* ```ts
* const transform = ({samples, prefix, inputString, outputString} => {
* const decoratedSamples = samples.map((example, idx) => {
* if (!(idx % 2)) {
* return `${inputString}:${example}`;
* } else {
* return `${outputString}:${example}`;
* }
* });
*
* return `\n${prefix}\n${decoratedSamples.join("\n")}`;
*
* })
*
* ```
*
* ## APIConfig
* ```
* engine:string; // The engine ID, defaults to davinci (ada, babbage, curie, davinci)
* prompt?:string; //One or more prompts to generate from. Can be a string, list of strings, a list of integers (i.e. a single prompt encoded as tokens), or list of lists of integers (i.e. many prompts encoded as integers).
* max_tokens?:number; //How many tokens to complete to. Can return fewer if a stop sequence is hit.
* temperature?:number; //What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend using this or top_p but not both.
* top_p?:number; //An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend using this or temperature but not both.
* n?:number; //How many choices to create for each prompt.
* stream?:boolean; //Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.
* logprobs?:integer; //Include the log probabilities on the logprobs most likely tokens. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
* stop?:string; //One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. *
* ```
*/
export declare class GPT3Rocket {
config: RootConfig;
constructor(configRef: RootConfig);
buildQuery(prompt: string, samples?: Samples, prefix?: string): any;
ask(prompt: string, samples?: Samples, prefix?: string, APIFlags?: APIFlags, APIConfig?: APIConfig): Promise<any>;
add(sample: [string, string]): void;
addPrefix(prefix: string): void;
changeTransformer(transformerFunction: (prompt: string, samples: Samples, prefix: string, inputString: string, outputString: string) => string): void;
resetTransformer(): void;
clear(): void;
clearSamples(): void;
clearPrefix(): void;
updateCredential(credential: string): void;
__debug(...payload: any): void;
_transformer(prompt: string, samples: Samples, prefix: string, inputString: string, outputString: string): string;
}
/**
* ENDPOINT
*
*/
export declare const gpt3Endpoint: (config: RootConfig) => (req: any, res: any, next: any) => Promise<any>;