@baseai/core
Version:
The Web AI Framework's core - BaseAI.dev
354 lines (346 loc) • 12.3 kB
text/typescript
import { ChatCompletionStream } from 'openai/lib/ChatCompletionStream';
interface GitConfig {
enabled: boolean;
include: string[];
gitignore?: boolean;
deployedAt?: string;
embeddedAt?: string;
}
interface MemoryDocumentI {
name: string;
size: string;
content: string;
blob: Blob;
path: string;
}
interface Document {
meta?: (doc: MemoryDocumentI) => Record<string, string>;
}
interface Memory {
name: string;
description?: string;
git: GitConfig;
documents?: Document;
}
type OpenAIModels = 'openai:gpt-4o' | 'openai:gpt-4o-2024-08-06' | 'openai:gpt-4o-mini' | 'openai:gpt-4-turbo' | 'openai:gpt-4-turbo-preview' | 'openai:gpt-4-0125-preview' | 'openai:gpt-4-1106-preview' | 'openai:gpt-4' | 'openai:gpt-4-0613' | 'openai:gpt-4-32k' | 'openai:gpt-3.5-turbo' | 'openai:gpt-3.5-turbo-0125' | 'openai:gpt-3.5-turbo-1106' | 'openai:gpt-3.5-turbo-16k';
type TogetherModels = 'together:meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'together:meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'together:meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'together:meta-llama/Llama-3-70b-chat-hf' | 'together:meta-llama/Llama-3-8b-chat-hf' | 'together:togethercomputer/Llama-2-7B-32K-Instruct' | 'together:meta-llama/Llama-2-13b-chat-hf' | 'together:meta-llama/Llama-2-70b-chat-hf' | 'together:google/gemma-7b-it' | 'together:google/gemma-2b-it' | 'together:mistralai/Mistral-7B-Instruct-v0.1' | 'together:mistralai/Mistral-7B-Instruct-v0.2' | 'together:mistralai/Mixtral-8x7B-Instruct-v0.1' | 'together:mistralai/Mixtral-8x22B-Instruct-v0.1' | 'together:databricks/dbrx-instruct' | 'together:meta-llama/Llama-3.3-70B-Instruct-Turbo';
type AnthropicModels = 'anthropic:claude-3-5-sonnet-latest' | 'anthropic:claude-3-5-sonnet-20240620' | 'anthropic:claude-3-opus-20240229' | 'anthropic:claude-3-sonnet-20240229' | 'anthropic:claude-3-haiku-20240307' | 'anthropic:claude-3-5-haiku-20241022';
type GroqModels = 'groq:llama-3.1-70b-versatile' | 'groq:llama-3.1-8b-instant' | 'groq:llama3-70b-8192' | 'groq:llama3-8b-8192' | 'groq:mixtral-8x7b-32768' | 'groq:gemma2-9b-it' | 'groq:gemma-7b-it' | 'groq:llama-3.3-70b-versatile';
type GoogleModels = 'google:gemini-1.5-pro-latest' | 'google:gemini-1.5-flash-latest' | 'google:gemini-1.5-flash-8b-latest' | 'google:gemini-pro';
type CohereModels = 'cohere:command-r' | 'cohere:command-r-plus';
type FireworksAIModels = 'fireworks:llama-v3p1-405b-instruct' | 'fireworks:llama-v3p1-8b-instruct' | 'fireworks:llama-v3p1-70b-instruct' | 'fireworks:llama-v3-70b-instruct' | 'fireworks:yi-large' | 'fireworks:llama-v3p3-70b-instruct';
type PerplexityModels = 'perplexity:llama-3.1-sonar-huge-128k-online' | 'perplexity:llama-3.1-sonar-large-128k-online' | 'perplexity:llama-3.1-sonar-small-128k-online' | 'perplexity:llama-3.1-sonar-large-128k-chat' | 'perplexity:llama-3.1-sonar-small-128k-chat';
type MistralAIModels = 'mistral:mistral-large-latest' | 'mistral:open-mistral-nemo' | 'mistral:codestral-latest';
type XAIModels = 'xai:grok-beta';
type OllamaModels = `ollama:${string}`;
type MessageRole = 'function' | 'assistant' | 'system' | 'user' | 'tool';
interface Function {
name: string;
arguments: string;
}
interface ToolCallResult {
id: string;
type: 'function';
function: Function;
}
interface Message {
role: MessageRole;
content: string | null;
name?: string;
tool_call_id?: string;
tool_calls?: ToolCallResult[];
}
interface ToolFunction {
name: string;
}
interface ToolChoiceFunction {
type: 'function';
function: ToolFunction;
}
type ToolChoice = 'auto' | 'required' | ToolChoiceFunction;
interface Tools {
type: 'function';
function: {
name: string;
description?: string;
parameters?: Record<string, any>;
};
}
type Model = OpenAIModels | TogetherModels | AnthropicModels | GroqModels | GoogleModels | CohereModels | FireworksAIModels | PerplexityModels | MistralAIModels | XAIModels | OllamaModels;
interface Pipe$1 {
apiKey?: string;
name: string;
description?: string;
status: 'public' | 'private';
model: Model;
stream?: boolean;
json?: boolean;
store?: boolean;
moderate?: boolean;
top_p: number;
max_tokens: number;
temperature: number;
presence_penalty: number;
frequency_penalty: number;
stop: string[];
tool_choice: ToolChoice;
parallel_tool_calls: boolean;
messages: Message[];
variables: any[];
tools: any[];
memory: {
name: string;
}[];
}
interface Tool {
run: (...args: any[]) => Promise<any> | any;
type: 'function';
function: {
name: string;
description?: string;
parameters?: Record<string, any>;
};
}
interface Variable {
name: string;
value: string;
}
interface RunOptions {
messages?: Message[];
variables?: Variable[];
threadId?: string;
rawResponse?: boolean;
runTools?: boolean;
tools?: Tools[];
name?: string;
apiKey?: string;
llmKey?: string;
}
interface RunOptionsStream extends RunOptions {
stream: boolean;
}
interface Usage {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
interface RunResponse {
completion: string;
threadId?: string;
id: string;
object: string;
created: number;
model: string;
choices: ChoiceGenerate[];
usage: Usage;
system_fingerprint: string | null;
rawResponse?: {
headers: Record<string, string>;
};
}
interface RunResponseStream {
stream: ReadableStream<any>;
threadId: string | null;
rawResponse?: {
headers: Record<string, string>;
};
}
interface PipeOptions extends Pipe$1 {
maxCalls?: number;
prod?: boolean;
}
interface ChoiceGenerate {
index: number;
message: Message;
logprobs: boolean | null;
finish_reason: string;
}
declare class Pipe {
private request;
private pipe;
private tools;
private maxCalls;
private hasTools;
private prod;
private baseUrl;
private entityApiKey?;
constructor(options: PipeOptions);
private getToolsFromPipe;
private runTools;
private hasNoToolCalls;
private getMessagesToSend;
private isStreamRequested;
private warnIfToolsWithStream;
private handleStreamResponse;
run(options: RunOptionsStream): Promise<RunResponseStream>;
run(options: RunOptions): Promise<RunResponse>;
private createRequest;
}
/**
* Generates text using the provided options.
*
* @param options - The options for generating text.
* @returns A promise that resolves to the generated text.
*/
declare const generateText: (options: RunOptions & {
pipe: Pipe;
}) => Promise<RunResponse>;
/**
* Streams text using the provided options.
*
* @param options - The options for streaming text.
* @returns A promise that resolves to the response of the stream operation.
*/
declare const streamText: (options: RunOptions & {
pipe: Pipe;
}) => Promise<RunResponseStream>;
interface ContentChunk {
type: 'content';
content: string;
}
interface ToolCallChunk {
type: 'toolCall';
toolCall: ToolCallResult;
}
interface ChoiceStream {
index: number;
delta: Delta;
logprobs: boolean | null;
finish_reason: string;
}
interface Delta {
role?: MessageRole;
content?: string;
tool_calls?: ToolCallResult[];
}
interface UnknownChunk {
type: 'unknown';
rawChunk: ChunkStream;
}
interface ChunkStream {
id: string;
object: string;
created: number;
model: string;
choices: ChoiceStream[];
}
interface Chunk {
type: 'content' | 'toolCall' | 'unknown';
content?: string;
toolCall?: ToolCallResult;
rawChunk?: ChunkStream;
}
/**
* Processes a chunk and returns a Chunk object.
*
* ```ts
* for await (const chunk of runner) {
* const processedChunk = processChunk({rawChunk: chunk});
* if (isContent(processedChunk)) {
* process.stdout.write(processedChunk.content);
* }
* }
* ```
*
* @param rawChunk - The raw chunk to process.
* @returns The processed Chunk object.
*/
declare const processChunk: ({ rawChunk }: {
rawChunk: any;
}) => Chunk;
/**
* Checks if the given chunk is a ContentChunk.
*
* @param chunk - The chunk to check.
* @returns True if the chunk is a ContentChunk, false otherwise.
*/
declare const isContent: (chunk: Chunk) => chunk is ContentChunk;
/**
* Determines if the given chunk is a ToolCallChunk.
*
* @param chunk - The chunk to be evaluated.
* @returns True if the chunk is of type 'toolCall', otherwise false.
*/
declare const isToolCall: (chunk: Chunk) => chunk is ToolCallChunk;
/**
* Checks if the given chunk is of type 'unknown'.
*
* @param chunk - The chunk to be checked.
* @returns True if the chunk is of type 'unknown', false otherwise.
*/
declare const isUnknown: (chunk: Chunk) => chunk is UnknownChunk;
/**
* Retrieves the text content from a given ChunkStream.
*
* @param chunk - The ChunkStream object.
* @returns The text content from the ChunkStream.
*/
declare const getTextContent: (chunk: any) => string;
/**
* Retrieves the text delta from a given chunk.
*
* @param chunk - The chunk stream to extract the text delta from.
* @returns The text delta content, or an empty string if it is not available.
*/
declare const getTextDelta: (chunk: ChunkStream) => string;
/**
* Writes the content of a TextStream to the standard output.
*
* @param stream - The TextStream to be printed.
* @returns A Promise that resolves when the printing is complete.
*/
declare const printStreamToStdout: (runner: Runner) => Promise<void>;
interface Runner extends ChatCompletionStream<null> {
}
/**
* Converts a ReadableStream into a Runner.
*
* @param readableStream - The ReadableStream to convert.
* @returns The converted Runner.
*/
declare const fromReadableStream: (readableStream: ReadableStream) => Runner;
/**
* Returns a runner for the given readable stream.
*
* @param readableStream - The readable stream to create a runner for.
* @returns A runner for the given readable stream.
*/
declare const getRunner: (readableStream: ReadableStream) => Runner;
/**
* Retrieves the text part from a given ChunkStream.
*
* @param chunk - The ChunkStream object.
* @returns The text content of the first choice's delta, or an empty string if it doesn't exist.
*/
declare const getTextPart: (chunk: ChunkStream) => string;
/**
* Handles the response stream from a given `Response` object.
*
* @param {Object} params - The parameters for handling the response stream.
* @param {Response} params.response - The API response to handle.
* @param {boolean} params.rawResponse - Optional flag to include raw response headers.
*
* @returns {Object} An object containing the processed stream, thread ID, and optionally raw response headers.
* @returns {ReadableStream<any>} return.stream - The readable stream created from the response.
* @returns {string | null} return.threadId - The thread ID extracted from the response headers.
* @returns {Object} [return.rawResponse] - Optional raw response headers.
* @returns {Record<string, string>} return.rawResponse.headers - The headers from the raw response.
*/
declare function handleResponseStream({ response, rawResponse, }: {
response: Response;
rawResponse?: boolean;
}): {
stream: any;
threadId: string | null;
rawResponse?: {
headers: Record<string, string>;
};
};
/**
* Retrieves tool calls from a given readable stream.
*
* @param stream - The readable stream from which to extract tool calls.
* @returns A promise that resolves to an array of `ToolCall` objects.
*/
declare function getToolsFromStream(stream: ReadableStream<any>): Promise<ToolCallResult[]>;
export { type Chunk, type ChunkStream, type Memory as MemoryI, type Message, type MessageRole, Pipe, type Pipe$1 as PipeI, type PipeOptions, type RunOptions, type RunOptionsStream, type RunResponse, type RunResponseStream, type Runner, type Tool as ToolI, type Usage, type Variable, fromReadableStream, generateText, getRunner, getTextContent, getTextDelta, getTextPart, getToolsFromStream, handleResponseStream, isContent, isToolCall, isUnknown, printStreamToStdout, processChunk, streamText };