@jackhua/mini-langchain
Version:
A lightweight TypeScript implementation of LangChain with cost optimization features
194 lines • 7.56 kB
JavaScript
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAI = void 0;
exports.createOpenAIFromEnv = createOpenAIFromEnv;
const axios_1 = __importDefault(require("axios"));
const base_1 = require("./base");
class OpenAI extends base_1.BaseChatLLM {
constructor(config) {
super();
this.apiKey = config.apiKey;
this.model = config.model || 'gpt-3.5-turbo';
this.organization = config.organization;
if (config.defaultTemperature !== undefined) {
this.defaultTemperature = config.defaultTemperature;
}
if (config.defaultMaxTokens !== undefined) {
this.defaultMaxTokens = config.defaultMaxTokens;
}
this.client = axios_1.default.create({
baseURL: config.baseURL || 'https://api.openai.com/v1',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json',
...(this.organization && { 'OpenAI-Organization': this.organization })
}
});
}
formatMessages(messages) {
return messages.map(msg => {
switch (msg.type) {
case 'system':
return { role: 'system', content: msg.content };
case 'human':
return { role: 'user', content: msg.content };
case 'ai':
return { role: 'assistant', content: msg.content };
case 'function':
return {
role: 'function',
content: msg.content,
name: msg.name
};
default:
throw new Error(`Unknown message type: ${msg.type}`);
}
});
}
async generate(messages, options) {
const mergedOptions = this.mergeOptions(options);
const formattedMessages = this.formatMessages(messages);
// Handle callbacks
const prompts = messages.map(m => m.content);
await this.handleLLMStart(prompts);
try {
const response = await this.client.post('/chat/completions', {
model: this.model,
messages: formattedMessages,
temperature: mergedOptions.temperature,
max_tokens: mergedOptions.maxTokens,
top_p: mergedOptions.topP,
frequency_penalty: mergedOptions.frequencyPenalty,
presence_penalty: mergedOptions.presencePenalty,
stop: mergedOptions.stop
}, {
timeout: mergedOptions.timeout,
signal: mergedOptions.signal
});
const choice = response.data.choices[0];
const result = {
text: choice.message.content,
message: {
type: 'ai',
content: choice.message.content,
additionalKwargs: {
function_call: choice.message.function_call
}
},
llmOutput: {
tokenUsage: response.data.usage,
model: response.data.model
}
};
await this.handleLLMEnd(result);
return result;
}
catch (error) {
const err = error;
await this.handleLLMError(err);
// Provide more detailed error information
if (error.response) {
const errorData = error.response.data;
console.error('\n🔴 OpenAI API Error:', {
status: error.response.status,
type: errorData.error?.type,
message: errorData.error?.message,
code: errorData.error?.code
});
if (error.response.status === 429) {
console.error('\n💡 Rate limit exceeded. Please check your API key quota at https://platform.openai.com/usage');
}
else if (error.response.status === 401) {
console.error('\n💡 Invalid API key. Please check your OPENAI_API_KEY in .env file');
}
}
throw err;
}
}
async *stream(messages, options) {
const mergedOptions = this.mergeOptions(options);
const formattedMessages = this.formatMessages(messages);
try {
const response = await this.client.post('/chat/completions', {
model: this.model,
messages: formattedMessages,
temperature: mergedOptions.temperature,
max_tokens: mergedOptions.maxTokens,
top_p: mergedOptions.topP,
frequency_penalty: mergedOptions.frequencyPenalty,
presence_penalty: mergedOptions.presencePenalty,
stop: mergedOptions.stop,
stream: true
}, {
responseType: 'stream',
timeout: mergedOptions.timeout,
signal: mergedOptions.signal
});
const stream = response.data;
let buffer = '';
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
return;
}
try {
const parsed = JSON.parse(data);
const delta = parsed.choices[0]?.delta;
if (delta?.content) {
yield {
text: delta.content,
generationInfo: {
finishReason: parsed.choices[0]?.finish_reason
}
};
}
}
catch (e) {
// Skip invalid JSON
console.error('Failed to parse SSE data:', e);
}
}
}
}
}
catch (error) {
const err = error;
await this.handleLLMError(err);
throw err;
}
}
get identifyingParams() {
return {
model: this.model,
temperature: this.defaultTemperature,
maxTokens: this.defaultMaxTokens
};
}
get llmType() {
return 'openai';
}
}
exports.OpenAI = OpenAI;
/**
* Helper function to create OpenAI instance from environment variables
*/
function createOpenAIFromEnv(config) {
const apiKey = config?.apiKey || process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error('OpenAI API key not found. Please set OPENAI_API_KEY environment variable.');
}
return new OpenAI({
apiKey,
model: config?.model || process.env.OPENAI_MODEL || 'gpt-3.5-turbo',
...config
});
}
//# sourceMappingURL=openai.js.map