@helicone/helpers
Version:
A Node.js wrapper for some of Helicone's common functionalities
300 lines (293 loc) • 10.9 kB
text/typescript
import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions';
import { ValidationError } from '@helicone-package/prompts/types';
type IHeliconeManualLogger = {
apiKey: string;
headers?: Record<string, string>;
loggingEndpoint?: string;
};
type ILogRequest = {
model: string;
[key: string]: any;
};
interface HeliconeEventTool {
_type: "tool";
toolName: string;
input: any;
[key: string]: any;
}
interface HeliconeEventVectorDB {
_type: "vector_db";
operation: "search" | "insert" | "delete" | "update";
text?: string;
vector?: number[];
topK?: number;
filter?: object;
databaseName?: string;
[key: string]: any;
}
type HeliconeCustomEventRequest = HeliconeEventTool | HeliconeEventVectorDB;
type HeliconeLogRequest = ILogRequest | HeliconeCustomEventRequest;
type Stream<T> = AsyncIterable<T> & {
tee(): [Stream<T>, Stream<T>];
toReadableStream(): ReadableStream<T>;
};
/**
* HeliconeLogBuilder provides a simplified way to handle streaming LLM responses
* with better error handling and async support.
*/
declare class HeliconeLogBuilder {
private logger;
private request;
private additionalHeaders?;
private startTime;
private endTime;
private responseBody;
private error;
private timeToFirstToken?;
private streamTexts;
private status;
private wasCancelled;
private streamState;
private attachedStream;
/**
* Creates a new HeliconeLogBuilder
* @param logger - The HeliconeManualLogger instance to use for logging
* @param request - The request object to log
* @param additionalHeaders - Additional headers to send with the request
*/
constructor(logger: HeliconeManualLogger, request: HeliconeLogRequest, additionalHeaders?: Record<string, string>);
/**
* Sets an error that occurred during the request
* @param error - The error that occurred
*/
setError(error: any): void;
/**
* Collects streaming responses and converts them to a readable stream
* while also capturing the response for logging
* @param stream - The stream from an LLM provider response
* @returns A ReadableStream that can be returned to the client
*/
toReadableStream<T>(stream: Stream<T>): ReadableStream;
addAdditionalHeaders(headers: Record<string, string>): void;
/**
* Attaches a stream to the log builder, this will consume the stream and log it on sendLog
* @param stream - The stream to attach
*/
attachStream<T>(stream: Stream<T>): Promise<void>;
/**
* Sets the response body for non-streaming responses
* @param body - The response body
*/
setResponse(body: string): void;
private waitForStreamToFinish;
private consumeStream;
/**
* Sends the log to Helicone
* @returns A Promise that resolves when logging is complete
*/
sendLog(): Promise<void>;
}
declare class HeliconeManualLogger {
private apiKey;
private headers;
private LOGGING_ENDPOINT;
constructor(opts: IHeliconeManualLogger);
/**
* Creates a log builder for more flexible stream handling with error management
* @param request - The request object to log
* @param additionalHeaders - Additional headers to send with the request
* @returns A HeliconeLogBuilder instance
*/
logBuilder(request: HeliconeLogRequest, additionalHeaders?: Record<string, string>): HeliconeLogBuilder;
/**
* Logs a custom request to Helicone
* @param request - The request object to log
* @param operation - The operation which will be executed and logged
* @param additionalHeaders - Additional headers to send with the request
* @returns The result of the `operation` function
*/
logRequest<T>(request: HeliconeLogRequest, operation: (resultRecorder: HeliconeResultRecorder) => Promise<T>, additionalHeaders?: Record<string, string>): Promise<T>;
/**
* Logs a single stream to Helicone
* @param request - The request object to log
* @param stream - The ReadableStream to consume and log
* @param additionalHeaders - Additional headers to send with the request
* @returns A Promise that resolves when logging is complete
*/
logSingleStream(request: HeliconeLogRequest, stream: ReadableStream, additionalHeaders?: Record<string, string>): Promise<void>;
/**
* Logs a single request with a response body to Helicone
* @param request - The request object to log
* @param body - The response body as a string
* @param additionalHeaders - Additional headers to send with the request
* @param latencyMs - The latency of the request in milliseconds
* @returns A Promise that resolves when logging is complete
*
* @example
* ```typescript
* helicone.logSingleRequest(request, body, { additionalHeaders: { "Helicone-User-Id": userId }, latencyMs: 1000 });
* ```
*/
logSingleRequest(request: HeliconeLogRequest, body: string, options: {
additionalHeaders?: Record<string, string>;
latencyMs?: number;
}): Promise<void>;
/**
* Logs a streaming operation to Helicone
* @param request - The request object to log
* @param operation - The operation which will be executed and logged, with access to a stream recorder
* @param additionalHeaders - Additional headers to send with the request
* @returns The result of the `operation` function
*
* @example
* ```typescript
* const response = await llmProvider.createChatCompletion({ stream: true, ... });
* const [stream1, stream2] = response.tee();
*
* helicone.logStream(
* requestBody,
* async (resultRecorder) => {
* resultRecorder.attachStream(stream2.toReadableStream());
* return stream1;
* },
* { "Helicone-User-Id": userId }
* );
* ```
*/
logStream<T>(request: HeliconeLogRequest, operation: (resultRecorder: HeliconeStreamResultRecorder) => Promise<T>, additionalHeaders?: Record<string, string>): Promise<T>;
sendLog(request: HeliconeLogRequest, response: Record<string, any> | string, options: {
startTime: number;
endTime: number;
additionalHeaders?: Record<string, string>;
timeToFirstToken?: number;
status?: number;
}): Promise<void>;
}
/**
* Recorder for handling and processing streams in Helicone logging
* Used to capture and process streaming responses from LLM providers
*/
declare class HeliconeStreamResultRecorder {
private streams;
firstChunkTimeUnix: number | null;
constructor();
/**
* Attaches a ReadableStream to be processed
* @param stream - The ReadableStream to attach
*/
attachStream(stream: ReadableStream): void;
/**
* Processes all attached streams and returns their contents as strings
* @returns Promise resolving to an array of strings containing the content of each stream
*/
getStreamTexts(): Promise<string[]>;
}
/**
* Recorder for handling and storing results in Helicone logging
* Used to capture non-streaming responses from operations
*/
declare class HeliconeResultRecorder {
private results;
/**
* Appends data to the results object
* @param data - The data to append to the results
*/
appendResults(data: Record<string, any>): void;
/**
* Gets the current results
* @returns The current results object
*/
getResults(): Record<string, any>;
}
interface HeliconePromptManagerOptions {
apiKey: string;
baseUrl?: string;
}
declare class HeliconePromptManager {
private apiKey;
private baseUrl;
constructor(options: HeliconePromptManagerOptions);
/**
* Pulls a prompt body from Helicone storage by prompt ID and optional version ID
* @param promptId - The unique identifier of the prompt
* @param versionId - Optional version ID, if not provided uses production version
* @returns The raw prompt body from storage
*/
pullPromptBody(promptId: string, versionId?: string): Promise<ChatCompletionCreateParams>;
/**
* Retrieves and merges prompt body with input parameters and variable substitution
* @param params - The chat completion parameters containing prompt_id, optional version_id, inputs, and other OpenAI parameters
* @returns Object containing the compiled prompt body and any validation/substitution errors
*/
getPromptBody(params: HeliconeChatCreateParams | HeliconeChatCreateParamsStreaming): Promise<{
body: ChatCompletionCreateParams;
errors: ValidationError[];
}>;
private getPromptVersion;
private getProductionVersion;
private fetchPromptBodyFromS3;
}
/**
* Parameters for using Helicone prompt templates.
*
* @example
* ```typescript
* const promptParams = {
* prompt_id: "XXXXXX",
* version_id: "5d4ec7d7-5725-46c2-ad26-41ddf6287527", // optional
* inputs: {
* name: "John",
* age: 20,
* }
* };
* ```
*/
type HeliconePromptParams = {
/** The unique identifier for your Helicone prompt template */
prompt_id: string;
/** Optional version ID. If not provided, uses the latest version */
version_id?: string;
/**
* Key-value pairs to interpolate into your prompt template.
* Keys should match the variable names in your template.
*/
inputs?: Record<string, any>;
};
/**
* OpenAI ChatCompletion parameters extended with Helicone prompt template support.
* Use this type when creating non-streaming chat completions with Helicone prompts.
*
* @example
* ```typescript
* const response = await openai.chat.completions.create({
* prompt_id: "XXXXXX",
* model: "gpt-4",
* messages: [{ role: "user", content: "Hello!" }],
* inputs: {
* name: "John",
* age: 20,
* }
* } as HeliconePromptChatCompletion);
* ```
*/
type HeliconeChatCreateParams = ChatCompletionCreateParamsNonStreaming & HeliconePromptParams;
/**
* OpenAI ChatCompletion parameters extended with Helicone prompt template support for streaming responses.
* Use this type when creating streaming chat completions with Helicone prompts.
*
* @example
* ```typescript
* const stream = await openai.chat.completions.create({
* prompt_id: "XXXXXX",
* model: "gpt-4",
* messages: [{ role: "user", content: "Hello!" }],
* stream: true,
* inputs: {
* name: "John",
* age: 20,
* }
* } as HeliconePromptChatCompletionStreaming);
* ```
*/
type HeliconeChatCreateParamsStreaming = ChatCompletionCreateParamsStreaming & HeliconePromptParams;
export { type HeliconeChatCreateParams, type HeliconeChatCreateParamsStreaming, HeliconeLogBuilder, HeliconeManualLogger, HeliconePromptManager, type HeliconePromptParams };