@posthog/ai
Version:
PostHog Node.js AI integrations
430 lines (415 loc) • 20.2 kB
TypeScript
import OpenAIOrignal, { OpenAI, APIPromise, ClientOptions as ClientOptions$1, AzureOpenAI } from 'openai';
import { PostHog } from 'posthog-node';
import { Stream } from 'openai/streaming';
import { ParsedResponse } from 'openai/resources/responses/responses';
import { ResponseCreateParamsWithTools, ExtractParsedContentFromParams } from 'openai/lib/ResponsesParser';
import { LanguageModelV2, LanguageModelV3 } from '@ai-sdk/provider';
import { ReadableSpan, SpanProcessor } from '@opentelemetry/sdk-trace-base';
import { Span, Context } from '@opentelemetry/api';
import AnthropicOriginal, { APIPromise as APIPromise$1 } from '@anthropic-ai/sdk';
import { Stream as Stream$1 } from '@anthropic-ai/sdk/streaming';
import { GoogleGenAI, GenerateContentParameters, GenerateContentResponse, GoogleGenAIOptions } from '@google/genai';
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
import { Serialized } from '@langchain/core/load/serializable';
import { ChainValues } from '@langchain/core/utils/types';
import { LLMResult } from '@langchain/core/outputs';
import { AgentAction, AgentFinish } from '@langchain/core/agents';
import { DocumentInterface } from '@langchain/core/documents';
import { BaseMessage } from '@langchain/core/messages';
/**
* Options for fetching a prompt
*/
interface GetPromptOptions {
cacheTtlSeconds?: number;
fallback?: string;
}
/**
* Variables for prompt compilation
*/
type PromptVariables = Record<string, string | number | boolean>;
/**
* Direct options for initializing Prompts without a PostHog client
*/
interface PromptsDirectOptions {
personalApiKey: string;
projectApiKey: string;
host?: string;
defaultCacheTtlSeconds?: number;
}
interface MonitoringEventPropertiesWithDefaults {
distinctId?: string;
traceId: string;
properties?: Record<string, any>;
privacyMode: boolean;
groups?: Record<string, any>;
modelOverride?: string;
providerOverride?: string;
costOverride?: CostOverride;
captureImmediate?: boolean;
}
type MonitoringEventProperties = Partial<MonitoringEventPropertiesWithDefaults>;
type MonitoringParams = {
[K in keyof MonitoringEventProperties as `posthog${Capitalize<string & K>}`]: MonitoringEventProperties[K];
};
interface CostOverride {
inputCost: number;
outputCost: number;
}
declare enum AIEvent {
Generation = "$ai_generation",
Embedding = "$ai_embedding"
}
declare const Chat: typeof OpenAI.Chat;
declare const Completions: typeof OpenAI.Chat.Completions;
declare const Responses: typeof OpenAI.Responses;
declare const Embeddings: typeof OpenAI.Embeddings;
declare const Audio: typeof OpenAI.Audio;
declare const Transcriptions: typeof OpenAI.Audio.Transcriptions;
type ChatCompletion$1 = OpenAI.ChatCompletion;
type ChatCompletionChunk$1 = OpenAI.ChatCompletionChunk;
type ChatCompletionCreateParamsBase$1 = OpenAI.Chat.Completions.ChatCompletionCreateParams;
type ChatCompletionCreateParamsNonStreaming$1 = OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming;
type ChatCompletionCreateParamsStreaming$1 = OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming;
type ResponsesCreateParamsBase = OpenAI.Responses.ResponseCreateParams;
type ResponsesCreateParamsNonStreaming = OpenAI.Responses.ResponseCreateParamsNonStreaming;
type ResponsesCreateParamsStreaming = OpenAI.Responses.ResponseCreateParamsStreaming;
type CreateEmbeddingResponse$1 = OpenAI.CreateEmbeddingResponse;
type EmbeddingCreateParams$1 = OpenAI.EmbeddingCreateParams;
interface MonitoringOpenAIConfig$1 extends ClientOptions$1 {
apiKey: string;
posthog: PostHog;
baseURL?: string;
}
type RequestOptions$2 = Record<string, unknown>;
declare class PostHogOpenAI extends OpenAI {
private readonly phClient;
chat: WrappedChat$1;
responses: WrappedResponses;
embeddings: WrappedEmbeddings$1;
audio: WrappedAudio;
constructor(config: MonitoringOpenAIConfig$1);
}
declare class WrappedChat$1 extends Chat {
constructor(parentClient: PostHogOpenAI, phClient: PostHog);
completions: WrappedCompletions$1;
}
declare class WrappedCompletions$1 extends Completions {
private readonly phClient;
private readonly baseURL;
constructor(client: OpenAI, phClient: PostHog);
create(body: ChatCompletionCreateParamsNonStreaming$1 & MonitoringParams, options?: RequestOptions$2): APIPromise<ChatCompletion$1>;
create(body: ChatCompletionCreateParamsStreaming$1 & MonitoringParams, options?: RequestOptions$2): APIPromise<Stream<ChatCompletionChunk$1>>;
create(body: ChatCompletionCreateParamsBase$1 & MonitoringParams, options?: RequestOptions$2): APIPromise<ChatCompletion$1 | Stream<ChatCompletionChunk$1>>;
}
declare class WrappedResponses extends Responses {
private readonly phClient;
private readonly baseURL;
constructor(client: OpenAI, phClient: PostHog);
create(body: ResponsesCreateParamsNonStreaming & MonitoringParams, options?: RequestOptions$2): APIPromise<OpenAI.Responses.Response>;
create(body: ResponsesCreateParamsStreaming & MonitoringParams, options?: RequestOptions$2): APIPromise<Stream<OpenAI.Responses.ResponseStreamEvent>>;
create(body: ResponsesCreateParamsBase & MonitoringParams, options?: RequestOptions$2): APIPromise<OpenAI.Responses.Response | Stream<OpenAI.Responses.ResponseStreamEvent>>;
parse<Params extends ResponseCreateParamsWithTools, ParsedT = ExtractParsedContentFromParams<Params>>(body: Params & MonitoringParams, options?: RequestOptions$2): APIPromise<ParsedResponse<ParsedT>>;
}
declare class WrappedEmbeddings$1 extends Embeddings {
private readonly phClient;
private readonly baseURL;
constructor(client: OpenAI, phClient: PostHog);
create(body: EmbeddingCreateParams$1 & MonitoringParams, options?: RequestOptions$2): APIPromise<CreateEmbeddingResponse$1>;
}
declare class WrappedAudio extends Audio {
constructor(parentClient: PostHogOpenAI, phClient: PostHog);
transcriptions: WrappedTranscriptions;
}
declare class WrappedTranscriptions extends Transcriptions {
private readonly phClient;
private readonly baseURL;
constructor(client: OpenAI, phClient: PostHog);
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParamsNonStreaming<'json' | undefined> & MonitoringParams, options?: RequestOptions$2): APIPromise<OpenAI.Audio.Transcriptions.Transcription>;
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParamsNonStreaming<'verbose_json'> & MonitoringParams, options?: RequestOptions$2): APIPromise<OpenAI.Audio.Transcriptions.TranscriptionVerbose>;
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'> & MonitoringParams, options?: RequestOptions$2): APIPromise<string>;
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParamsNonStreaming, options?: RequestOptions$2): APIPromise<OpenAI.Audio.Transcriptions.Transcription>;
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParamsStreaming & MonitoringParams, options?: RequestOptions$2): APIPromise<Stream<OpenAI.Audio.Transcriptions.TranscriptionStreamEvent>>;
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParamsStreaming & MonitoringParams, options?: RequestOptions$2): APIPromise<OpenAI.Audio.Transcriptions.TranscriptionCreateResponse | string | Stream<OpenAI.Audio.Transcriptions.TranscriptionStreamEvent>>;
create(body: OpenAI.Audio.Transcriptions.TranscriptionCreateParams & MonitoringParams, options?: RequestOptions$2): APIPromise<OpenAI.Audio.Transcriptions.TranscriptionCreateResponse | string | Stream<OpenAI.Audio.Transcriptions.TranscriptionStreamEvent>>;
}
type ChatCompletion = OpenAIOrignal.ChatCompletion;
type ChatCompletionChunk = OpenAIOrignal.ChatCompletionChunk;
type ChatCompletionCreateParamsBase = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParams;
type ChatCompletionCreateParamsNonStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsNonStreaming;
type ChatCompletionCreateParamsStreaming = OpenAIOrignal.Chat.Completions.ChatCompletionCreateParamsStreaming;
type CreateEmbeddingResponse = OpenAIOrignal.CreateEmbeddingResponse;
type EmbeddingCreateParams = OpenAIOrignal.EmbeddingCreateParams;
interface MonitoringOpenAIConfig {
apiKey: string;
posthog: PostHog;
baseURL?: string;
}
type RequestOptions$1 = Record<string, any>;
declare class PostHogAzureOpenAI extends AzureOpenAI {
private readonly phClient;
chat: WrappedChat;
embeddings: WrappedEmbeddings;
constructor(config: MonitoringOpenAIConfig);
}
declare class WrappedChat extends AzureOpenAI.Chat {
constructor(parentClient: PostHogAzureOpenAI, phClient: PostHog);
completions: WrappedCompletions;
}
declare class WrappedCompletions extends AzureOpenAI.Chat.Completions {
private readonly phClient;
private readonly baseURL;
constructor(client: AzureOpenAI, phClient: PostHog);
create(body: ChatCompletionCreateParamsNonStreaming & MonitoringParams, options?: RequestOptions$1): APIPromise<ChatCompletion>;
create(body: ChatCompletionCreateParamsStreaming & MonitoringParams, options?: RequestOptions$1): APIPromise<Stream<ChatCompletionChunk>>;
create(body: ChatCompletionCreateParamsBase & MonitoringParams, options?: RequestOptions$1): APIPromise<ChatCompletion | Stream<ChatCompletionChunk>>;
}
declare class WrappedEmbeddings extends AzureOpenAI.Embeddings {
private readonly phClient;
private readonly baseURL;
constructor(client: AzureOpenAI, phClient: PostHog);
create(body: EmbeddingCreateParams & MonitoringParams, options?: RequestOptions$1): APIPromise<CreateEmbeddingResponse>;
}
type LanguageModel = LanguageModelV2 | LanguageModelV3;
interface ClientOptions {
posthogDistinctId?: string;
posthogTraceId?: string;
posthogProperties?: Record<string, any>;
posthogPrivacyMode?: boolean;
posthogGroups?: Record<string, any>;
posthogModelOverride?: string;
posthogProviderOverride?: string;
posthogCostOverride?: CostOverride;
posthogCaptureImmediate?: boolean;
}
/**
* Wraps a Vercel AI SDK language model (V2 or V3) with PostHog tracing.
* Automatically detects the model version and applies appropriate instrumentation.
*/
declare const wrapVercelLanguageModel: <T extends LanguageModel>(model: T, phClient: PostHog, options: ClientOptions) => T;
type UsageData = Record<string, unknown>;
interface PostHogSpanMapperContext {
options: PostHogTelemetryOptions;
}
interface PostHogSpanMapperResult {
distinctId?: string;
traceId?: string;
model?: string;
provider: string;
input: any;
output: any;
latency: number;
timeToFirstToken?: number;
baseURL?: string;
httpStatus?: number;
eventType?: AIEvent;
usage?: UsageData;
tools?: any[] | null;
modelParams?: Record<string, unknown>;
posthogProperties?: Record<string, unknown>;
error?: unknown;
}
interface PostHogSpanMapper {
name: string;
canMap: (span: ReadableSpan) => boolean;
map: (span: ReadableSpan, context: PostHogSpanMapperContext) => PostHogSpanMapperResult | null;
}
type ShouldExportSpan = (params: {
otelSpan: ReadableSpan;
}) => boolean;
interface PostHogTelemetryOptions {
posthogDistinctId?: string;
posthogTraceId?: string;
posthogProperties?: Record<string, any>;
posthogPrivacyMode?: boolean;
posthogGroups?: Record<string, any>;
posthogModelOverride?: string;
posthogProviderOverride?: string;
posthogCostOverride?: CostOverride;
posthogCaptureImmediate?: boolean;
mappers?: PostHogSpanMapper[];
shouldExportSpan?: ShouldExportSpan;
}
declare function captureSpan(span: ReadableSpan, phClient: PostHog, options?: PostHogTelemetryOptions): Promise<void>;
declare class PostHogSpanProcessor implements SpanProcessor {
private readonly phClient;
private readonly options;
private readonly pendingCaptures;
constructor(phClient: PostHog, options?: PostHogTelemetryOptions);
onStart(_span: Span, _parentContext: Context): void;
onEnd(span: ReadableSpan): void;
shutdown(): Promise<void>;
forceFlush(): Promise<void>;
}
declare function createPostHogSpanProcessor(phClient: PostHog, options?: PostHogTelemetryOptions): SpanProcessor;
type MessageCreateParamsNonStreaming = AnthropicOriginal.Messages.MessageCreateParamsNonStreaming;
type MessageCreateParamsStreaming = AnthropicOriginal.Messages.MessageCreateParamsStreaming;
type Message = AnthropicOriginal.Messages.Message;
type RawMessageStreamEvent = AnthropicOriginal.Messages.RawMessageStreamEvent;
type MessageCreateParamsBase = AnthropicOriginal.Messages.MessageCreateParams;
type RequestOptions = AnthropicOriginal.RequestOptions;
interface MonitoringAnthropicConfig {
apiKey: string;
posthog: PostHog;
baseURL?: string;
}
declare class PostHogAnthropic extends AnthropicOriginal {
private readonly phClient;
messages: WrappedMessages;
constructor(config: MonitoringAnthropicConfig);
}
declare class WrappedMessages extends AnthropicOriginal.Messages {
private readonly phClient;
private readonly baseURL;
constructor(parentClient: PostHogAnthropic, phClient: PostHog);
create(body: MessageCreateParamsNonStreaming, options?: RequestOptions): APIPromise$1<Message>;
create(body: MessageCreateParamsStreaming & MonitoringParams, options?: RequestOptions): APIPromise$1<Stream$1<RawMessageStreamEvent>>;
create(body: MessageCreateParamsBase & MonitoringParams, options?: RequestOptions): APIPromise$1<Stream$1<RawMessageStreamEvent> | Message>;
}
interface MonitoringGeminiConfig extends GoogleGenAIOptions {
posthog: PostHog;
}
declare class PostHogGoogleGenAI {
private readonly phClient;
private readonly client;
models: WrappedModels;
constructor(config: MonitoringGeminiConfig);
}
declare class WrappedModels {
private readonly phClient;
private readonly client;
constructor(client: GoogleGenAI, phClient: PostHog);
generateContent(params: GenerateContentParameters & MonitoringParams): Promise<GenerateContentResponse>;
generateContentStream(params: GenerateContentParameters & MonitoringParams): AsyncGenerator<GenerateContentResponse, void, unknown>;
private formatPartsAsContentBlocks;
private formatInput;
private extractSystemInstruction;
private formatInputForPostHog;
}
declare class LangChainCallbackHandler extends BaseCallbackHandler {
name: string;
private client;
private distinctId?;
private traceId?;
private properties;
private privacyMode;
private groups;
private debug;
private runs;
private parentTree;
constructor(options: {
client: PostHog;
distinctId?: string | number;
traceId?: string | number;
properties?: Record<string, any>;
privacyMode?: boolean;
groups?: Record<string, any>;
debug?: boolean;
});
handleChainStart(chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, unknown>, _runType?: string, runName?: string): void;
handleChainEnd(outputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], _kwargs?: {
inputs?: Record<string, unknown>;
}): void;
handleChainError(error: Error, runId: string, parentRunId?: string, tags?: string[], _kwargs?: {
inputs?: Record<string, unknown>;
}): void;
handleChatModelStart(serialized: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string, extraParams?: Record<string, unknown>, tags?: string[], metadata?: Record<string, unknown>, runName?: string): void;
handleLLMStart(serialized: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: Record<string, unknown>, tags?: string[], metadata?: Record<string, unknown>, runName?: string): void;
handleLLMEnd(output: LLMResult, runId: string, parentRunId?: string, tags?: string[], _extraParams?: Record<string, unknown>): void;
handleLLMError(err: Error, runId: string, parentRunId?: string, tags?: string[], _extraParams?: Record<string, unknown>): void;
handleToolStart(tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, unknown>, runName?: string): void;
handleToolEnd(output: any, runId: string, parentRunId?: string, tags?: string[]): void;
handleToolError(err: Error, runId: string, parentRunId?: string, tags?: string[]): void;
handleRetrieverStart(retriever: Serialized, query: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, unknown>, name?: string): void;
handleRetrieverEnd(documents: DocumentInterface[], runId: string, parentRunId?: string, tags?: string[]): void;
handleRetrieverError(err: Error, runId: string, parentRunId?: string, tags?: string[]): void;
handleAgentAction(action: AgentAction, runId: string, parentRunId?: string, tags?: string[]): void;
handleAgentEnd(action: AgentFinish, runId: string, parentRunId?: string, tags?: string[]): void;
private _setParentOfRun;
private _popParentOfRun;
private _findRootRun;
private _setTraceOrSpanMetadata;
private _setLLMMetadata;
private _popRunMetadata;
private _getTraceId;
private _getParentRunId;
private _popRunAndCaptureTraceOrSpan;
private _captureTraceOrSpan;
private _popRunAndCaptureGeneration;
private _captureGeneration;
private _logDebugEvent;
private _getLangchainRunName;
private _convertLcToolCallsToOai;
private _extractRawResponse;
private _convertMessageToDict;
private _parseUsageModel;
private parseUsage;
}
interface PromptsWithPostHogOptions {
posthog: PostHog;
defaultCacheTtlSeconds?: number;
}
type PromptsOptions = PromptsWithPostHogOptions | PromptsDirectOptions;
/**
* Prompts class for fetching and compiling LLM prompts from PostHog
*
* @example
* ```ts
* // With PostHog client
* const prompts = new Prompts({ posthog })
*
* // Or with direct options (no PostHog client needed)
* const prompts = new Prompts({
* personalApiKey: 'phx_xxx',
* projectApiKey: 'phc_xxx',
* host: 'https://us.posthog.com',
* })
*
* // Fetch with caching and fallback
* const template = await prompts.get('support-system-prompt', {
* cacheTtlSeconds: 300,
* fallback: 'You are a helpful assistant.',
* })
*
* // Compile with variables
* const systemPrompt = prompts.compile(template, {
* company: 'Acme Corp',
* tier: 'premium',
* })
* ```
*/
declare class Prompts {
private personalApiKey;
private projectApiKey;
private host;
private defaultCacheTtlSeconds;
private cache;
constructor(options: PromptsOptions);
/**
* Fetch a prompt by name from the PostHog API
*
* @param name - The name of the prompt to fetch
* @param options - Optional settings for caching and fallback
* @returns The prompt string
* @throws Error if the prompt cannot be fetched and no fallback is provided
*/
get(name: string, options?: GetPromptOptions): Promise<string>;
/**
* Compile a prompt template with variable substitution
*
* Variables in the format `{{variableName}}` will be replaced with values from the variables object.
* Unmatched variables are left unchanged.
*
* @param prompt - The prompt template string
* @param variables - Object containing variable values
* @returns The compiled prompt string
*/
compile(prompt: string, variables: PromptVariables): string;
/**
* Clear the cache for a specific prompt or all prompts
*
* @param name - Optional prompt name to clear. If not provided, clears all cached prompts.
*/
clearCache(name?: string): void;
private fetchPromptFromApi;
}
export { PostHogAnthropic as Anthropic, PostHogAzureOpenAI as AzureOpenAI, PostHogGoogleGenAI as GoogleGenAI, LangChainCallbackHandler, PostHogOpenAI as OpenAI, PostHogSpanProcessor, Prompts, captureSpan, createPostHogSpanProcessor, wrapVercelLanguageModel as withTracing };