UNPKG

@osohq/langchain

Version:

Oso observability integration for LangChain agents

79 lines 3.7 kB
/** * LangChain callback handler for Oso observability integration. * * Example usage: * import { OsoObservabilityCallback } from '@osohq/langchain'; * * const callback = new OsoObservabilityCallback({ * authToken: "your-oso-auth-token", * agentId: "my-support-agent" * }); * * const agent = createAgent({ callbacks: [callback] }); * const result = await agent.invoke({ input: "Hello" }); * await callback.close(); */ import { BaseCallbackHandler } from "@langchain/core/callbacks/base"; import type { AgentAction, AgentFinish } from "@langchain/core/agents"; import type { LLMResult } from "@langchain/core/outputs"; import type { Serialized } from "@langchain/core/load/serializable"; interface OsoObservabilityCallbackConfig { /** Oso observability endpoint URL (default: OSO_ENDPOINT env var or https://cloud.osohq.com/api/events) */ endpoint?: string; /** Oso authentication token (default: OSO_AUTH_TOKEN env var) */ authToken?: string; /** Whether to send events (default: OSO_OBSERVABILITY_ENABLED env var or true) */ enabled?: boolean; /** Session ID to group related conversations (default: auto-generated UUID) */ sessionId?: string; /** Additional metadata to attach to all events */ metadata?: Record<string, any>; /** Agent identifier for tracking (default: "default-agent") */ agentId?: string; } /** * LangChain callback handler that sends agent events to Oso observability. * * Automatically captures: * - LLM calls (model, prompts, responses, token usage) * - Tool executions (name, inputs, outputs, duration, errors) * - Agent reasoning (decisions, thoughts, intermediate steps) * - Chain executions (starts, ends, inputs, outputs) * - Errors at any stage */ export declare class OsoObservabilityCallback extends BaseCallbackHandler { name: string; private endpoint; private authToken?; private enabled; private sessionId; private metadata; private agentId; private executionId; private executionStartTime; private toolStartTimes; private llmCalls; private toolCalls; private agentSteps; private errors; constructor(config?: OsoObservabilityCallbackConfig); handleLLMStart(llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: Record<string, any>, tags?: string[], metadata?: Record<string, any>): Promise<void>; handleLLMEnd(output: LLMResult, runId: string, parentRunId?: string): Promise<void>; handleLLMError(error: Error, runId: string, parentRunId?: string): Promise<void>; handleToolStart(tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, any>): Promise<void>; handleToolEnd(output: string, runId: string, parentRunId?: string): Promise<void>; handleToolError(error: Error, runId: string, parentRunId?: string): Promise<void>; handleAgentAction(action: AgentAction, runId: string, parentRunId?: string): Promise<void>; handleAgentEnd(action: AgentFinish, runId: string, parentRunId?: string): Promise<void>; handleChainStart(chain: Serialized, inputs: Record<string, any>, runId: string, parentRunId?: string, tags?: string[], metadata?: Record<string, any>): Promise<void>; handleChainEnd(outputs: Record<string, any>, runId: string, parentRunId?: string): Promise<void>; handleChainError(error: Error, runId: string, parentRunId?: string): Promise<void>; private sendEvent; private sendExecutionSummary; /** * Clean up resources. Call this when you're done using the callback. */ close(): Promise<void>; } export {}; //# sourceMappingURL=callbacks.d.ts.map