UNPKG

agentlang

Version:

The easiest way to build the most reliable AI agents - enterprise-grade teams of AI agents that collaborate with each other and humans

103 lines 4.47 kB
import { ChatOpenAI } from '@langchain/openai'; import { asAIResponse } from '../provider.js'; import { getLocalEnv } from '../../auth/defs.js'; export class OpenAIProvider { constructor(config) { this.config = this.parseConfig(config); const chatConfig = { model: this.config.model, temperature: this.config.temperature, maxTokens: this.config.maxTokens, topP: this.config.topP, frequencyPenalty: this.config.frequencyPenalty, presencePenalty: this.config.presencePenalty, maxRetries: this.config.maxRetries, streamUsage: this.config.streamUsage, logprobs: this.config.logprobs, topLogprobs: this.config.topLogprobs, }; if (this.config.apiKey) { chatConfig.apiKey = this.config.apiKey; } if (this.config.configuration) { chatConfig.configuration = this.config.configuration; } this.model = new ChatOpenAI(chatConfig); } parseConfig(config) { var _a, _b; const defaultConfig = { model: 'gpt-4o', temperature: 0.7, maxTokens: 4096, topP: 1.0, frequencyPenalty: 0, presencePenalty: 0, maxRetries: 2, streamUsage: true, logprobs: false, }; if (!config) { return Object.assign(Object.assign({}, defaultConfig), { apiKey: process.env.OPENAI_API_KEY || getLocalEnv('OPENAI_API_KEY') }); } const apiKey = config.get('apiKey') || config.get('api_key') || process.env.OPENAI_API_KEY || getLocalEnv('OPENAI_API_KEY'); return { model: config.get('model') || defaultConfig.model, temperature: (_a = config.get('temperature')) !== null && _a !== void 0 ? _a : defaultConfig.temperature, maxTokens: config.get('maxTokens') || config.get('max_tokens') || defaultConfig.maxTokens, topP: config.get('topP') || config.get('top_p') || defaultConfig.topP, frequencyPenalty: config.get('frequencyPenalty') || config.get('frequency_penalty') || defaultConfig.frequencyPenalty, presencePenalty: config.get('presencePenalty') || config.get('presence_penalty') || defaultConfig.presencePenalty, maxRetries: config.get('maxRetries') || config.get('max_retries') || defaultConfig.maxRetries, streamUsage: config.get('streamUsage') || config.get('stream_usage') || defaultConfig.streamUsage, logprobs: (_b = config.get('logprobs')) !== null && _b !== void 0 ? _b : defaultConfig.logprobs, topLogprobs: config.get('topLogprobs') || config.get('top_logprobs'), apiKey, configuration: config.get('configuration'), }; } async invoke(messages, externalToolSpecs) { if (!this.config.apiKey) { throw new Error('OpenAI API key is required. Set OPENAI_API_KEY environment variable or use setLocalEnv("OPENAI_API_KEY", key) or provide apiKey in config.'); } if (externalToolSpecs) { const m = this.model.bindTools(externalToolSpecs); const r = await m.invoke(messages); return asAIResponse(r); } return asAIResponse(await this.model.invoke(messages)); } getConfig() { return Object.assign({}, this.config); } updateConfig(newConfig) { this.config = Object.assign(Object.assign({}, this.config), newConfig); const chatConfig = { model: this.config.model, temperature: this.config.temperature, maxTokens: this.config.maxTokens, topP: this.config.topP, frequencyPenalty: this.config.frequencyPenalty, presencePenalty: this.config.presencePenalty, maxRetries: this.config.maxRetries, streamUsage: this.config.streamUsage, logprobs: this.config.logprobs, topLogprobs: this.config.topLogprobs, }; if (this.config.apiKey) { chatConfig.apiKey = this.config.apiKey; } if (this.config.configuration) { chatConfig.configuration = this.config.configuration; } this.model = new ChatOpenAI(chatConfig); } } //# sourceMappingURL=openai.js.map