@unified-llm/core
Version:
Unified LLM interface (in-memory).
698 lines • 31.7 kB
JavaScript
import OpenAI from 'openai';
import { validateChatRequest } from '../../utils/validation';
import BaseProvider from '../base-provider';
import { ResponseFormat } from '../../response-format';
export class OpenAIProvider extends BaseProvider {
constructor({ apiKey, model, tools, options }) {
super({ model: model, tools });
this.apiKey = apiKey;
this.useResponsesAPI = (options === null || options === void 0 ? void 0 : options.useResponsesAPI) || false;
this.client = new OpenAI({ apiKey });
}
async chat(request) {
validateChatRequest(request);
try {
if (this.useResponsesAPI) {
return this.chatWithResponsesAPI(request);
}
else {
return this.chatWithChatCompletions(request);
}
}
catch (error) {
throw this.handleError(error);
}
}
async chatWithChatCompletions(request) {
if (!request.model && !this.model) {
throw new Error('Model is required for OpenAI chat completions');
}
const openAIRequest = this.convertToOpenAIFormat(request);
let response = await this.client.chat.completions.create(openAIRequest);
let messages = [...openAIRequest.messages];
// ツール呼び出しがある場合、実行して結果を返す
while (response.choices[0].finish_reason === 'tool_calls' && this.tools) {
const toolCalls = response.choices[0].message.tool_calls;
const toolResults = [];
if (toolCalls) {
for (const toolCall of toolCalls) {
if (toolCall.type === 'function') {
const customFunction = this.tools.find(func => func.function.name === toolCall.function.name);
if (customFunction) {
try {
// CustomFunctionのargsとfunction callのargsをマージ
const mergedArgs = {
...(customFunction.args || {}),
...JSON.parse(toolCall.function.arguments)
};
const result = await customFunction.handler(mergedArgs);
toolResults.push({
role: 'tool',
content: typeof result === 'string' ? result : JSON.stringify(result),
tool_call_id: toolCall.id,
});
}
catch (error) {
toolResults.push({
role: 'tool',
content: error instanceof Error ? error.message : 'Unknown error',
tool_call_id: toolCall.id,
});
}
}
}
}
}
// ツール実行結果を含めて再度リクエスト
if (toolResults.length > 0) {
messages = [
...messages,
response.choices[0].message,
...toolResults,
];
const followUpRequest = {
...openAIRequest,
messages,
};
response = await this.client.chat.completions.create(followUpRequest);
}
else {
break;
}
}
return this.convertFromOpenAIFormat(response);
}
async chatWithResponsesAPI(request) {
var _a;
// NOTE: Responses API is not yet available in the OpenAI SDK
// This implementation uses the raw HTTP client to call the new API
const responsesRequest = this.convertToResponsesAPIFormat(request);
// Make raw HTTP request to the Responses API
const response = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
'OpenAI-Beta': 'responses-v1',
},
body: JSON.stringify(responsesRequest),
});
if (!response.ok) {
const error = await response.json();
throw new Error(((_a = error.error) === null || _a === void 0 ? void 0 : _a.message) || 'API request failed');
}
const data = await response.json();
return this.convertFromResponsesAPIFormat(data);
}
async *stream(request) {
validateChatRequest(request);
if (this.useResponsesAPI) {
yield* this.streamWithResponsesAPI(request);
}
else {
yield* this.streamWithChatCompletions(request);
}
}
async *streamWithChatCompletions(request) {
var _a, _b, _c, _d;
const openAIRequest = this.convertToOpenAIFormat(request);
let messages = [...openAIRequest.messages];
// Keep trying to get a response until we don't get tool calls
while (true) {
const stream = await this.client.chat.completions.create({
...openAIRequest,
messages,
stream: true,
});
// Accumulate tool calls across chunks
const toolCallAccumulator = new Map();
let finishReason = null;
const assistantMessage = { role: 'assistant', content: null };
let fullContent = '';
const bufferedChunks = [];
let hasToolCalls = false;
for await (const chunk of stream) {
// Handle tool calls in the delta
if (chunk.choices[0].delta.tool_calls) {
hasToolCalls = true;
for (const toolCallDelta of chunk.choices[0].delta.tool_calls) {
const index = toolCallDelta.index;
if (!toolCallAccumulator.has(index)) {
// Initialize new tool call
toolCallAccumulator.set(index, {
id: toolCallDelta.id || '',
type: toolCallDelta.type || 'function',
function: {
name: ((_a = toolCallDelta.function) === null || _a === void 0 ? void 0 : _a.name) || '',
arguments: ((_b = toolCallDelta.function) === null || _b === void 0 ? void 0 : _b.arguments) || '',
}
});
}
else {
// Accumulate arguments for existing tool call
const existing = toolCallAccumulator.get(index);
if (!existing)
continue;
if (toolCallDelta.id)
existing.id = toolCallDelta.id;
if ((_c = toolCallDelta.function) === null || _c === void 0 ? void 0 : _c.name)
existing.function.name = toolCallDelta.function.name;
if ((_d = toolCallDelta.function) === null || _d === void 0 ? void 0 : _d.arguments)
existing.function.arguments += toolCallDelta.function.arguments;
}
}
}
// If we detect tool calls, start buffering. Otherwise, yield immediately.
if (hasToolCalls) {
bufferedChunks.push(chunk);
// Accumulate text content for tool call processing
if (chunk.choices[0].delta.content) {
fullContent += chunk.choices[0].delta.content;
}
}
else {
// No tool calls detected yet, yield chunk immediately
yield this.convertStreamChunk(chunk);
}
// Capture finish reason
if (chunk.choices[0].finish_reason) {
finishReason = chunk.choices[0].finish_reason;
}
}
// If we have tool calls and tools are available, execute them
if (finishReason === 'tool_calls' && this.tools && toolCallAccumulator.size > 0) {
// Build the complete assistant message
if (fullContent) {
assistantMessage.content = fullContent;
}
if (toolCallAccumulator.size > 0) {
assistantMessage.tool_calls = Array.from(toolCallAccumulator.values());
}
const toolResults = [];
for (const toolCall of toolCallAccumulator.values()) {
if (toolCall.type === 'function') {
const customFunction = this.tools.find(func => func.function.name === toolCall.function.name);
if (customFunction) {
try {
// Merge default args with function call args
const mergedArgs = {
...(customFunction.args || {}),
...JSON.parse(toolCall.function.arguments)
};
const result = await customFunction.handler(mergedArgs);
toolResults.push({
role: 'tool',
content: typeof result === 'string' ? result : JSON.stringify(result),
tool_call_id: toolCall.id,
});
}
catch (error) {
toolResults.push({
role: 'tool',
content: error instanceof Error ? error.message : 'Unknown error',
tool_call_id: toolCall.id,
});
}
}
}
}
// Continue with tool results if we have any
if (toolResults.length > 0) {
messages = [
...messages,
assistantMessage,
...toolResults,
];
// Continue the loop to get the next response
continue;
}
}
// If we buffered chunks due to tool calls but no tools to execute, yield them now
if (hasToolCalls && bufferedChunks.length > 0) {
for (const chunk of bufferedChunks) {
yield this.convertStreamChunk(chunk);
}
}
break;
}
}
async *streamWithResponsesAPI(request) {
var _a, _b;
const responsesRequest = this.convertToResponsesAPIFormat(request);
// Make raw HTTP request to the Responses API with streaming
const response = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
'Accept': 'text/event-stream',
'OpenAI-Beta': 'responses-v1',
},
body: JSON.stringify({
...responsesRequest,
stream: true,
}),
});
if (!response.ok) {
const error = await response.json();
throw new Error(((_a = error.error) === null || _a === void 0 ? void 0 : _a.message) || 'API request failed');
}
// Parse SSE stream
const reader = (_b = response.body) === null || _b === void 0 ? void 0 : _b.getReader();
if (!reader)
throw new Error('No response body');
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done)
break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]')
continue;
try {
const chunk = JSON.parse(data);
yield this.convertResponsesStreamChunk(chunk);
}
catch (_e) {
// Ignore parse errors
}
}
}
}
}
convertToOpenAIFormat(request) {
var _a, _b, _c, _d, _f, _g, _h, _j, _k, _l, _m;
const model = request.model || this.model;
if (!model) {
throw new Error('Model is required for OpenAI chat completions');
}
const messages = request.messages.map(msg => {
const content = this.normalizeContent(msg.content);
// tool_resultメッセージの特別処理
if (msg.role === 'tool' || content.some(c => c.type === 'tool_result')) {
const toolResults = content.filter(c => c.type === 'tool_result');
if (toolResults.length > 0) {
return toolResults.map(tr => ({
role: 'tool',
content: Array.isArray(tr.content)
? tr.content.map(item => item.type === 'text' ? item.text : '[Non-text content]').join('\n')
: '[Tool result]',
tool_call_id: tr.toolUseId,
}));
}
}
// システムメッセージの処理 - OpenAIではmessages配列内でrole: "system"として送信
if (msg.role === 'system') {
return {
role: 'system',
content: content.length === 1 && content[0].type === 'text'
? content[0].text
: content.filter(c => c.type === 'text').map(c => c.text).join('\n') || '[System message]',
};
}
// OpenAIは単一のテキストメッセージの場合、文字列として送信
if (content.length === 1 && content[0].type === 'text') {
return {
role: msg.role,
content: content[0].text,
name: msg.name,
};
}
// tool_useコンテンツがある場合の特別処理
const toolUseContents = content.filter(c => c.type === 'tool_use');
if (toolUseContents.length > 0) {
// assistantメッセージでtool_callsを含む場合
const textContent = content.filter(c => c.type === 'text').map(c => c.text).join('\n');
return {
role: msg.role,
content: textContent || null,
tool_calls: toolUseContents.map(toolUse => ({
id: toolUse.id,
type: 'function',
function: {
name: toolUse.name,
arguments: JSON.stringify(toolUse.input)
}
})),
name: msg.name,
};
}
// マルチモーダルコンテンツの変換
const openAIContent = content.map(c => {
switch (c.type) {
case 'text':
return { type: 'text', text: c.text };
case 'image':
return {
type: 'image_url',
image_url: {
url: c.source.url || `data:${c.source.mediaType};base64,${c.source.data}`,
},
};
default:
return { type: 'text', text: '[Unsupported content type]' };
}
});
return {
role: msg.role,
content: openAIContent,
name: msg.name,
};
}).flat(); // tool_resultで配列になる可能性があるのでflatten
return {
model: model,
messages,
temperature: (_a = request.generationConfig) === null || _a === void 0 ? void 0 : _a.temperature,
max_tokens: (_b = request.generationConfig) === null || _b === void 0 ? void 0 : _b.max_tokens,
top_p: (_c = request.generationConfig) === null || _c === void 0 ? void 0 : _c.top_p,
frequency_penalty: (_d = request.generationConfig) === null || _d === void 0 ? void 0 : _d.frequencyPenalty,
presence_penalty: (_f = request.generationConfig) === null || _f === void 0 ? void 0 : _f.presencePenalty,
stop: (_g = request.generationConfig) === null || _g === void 0 ? void 0 : _g.stopSequences,
tools: [
...(((_h = request.tools) === null || _h === void 0 ? void 0 : _h.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_j = this.tools) === null || _j === void 0 ? void 0 : _j.map(func => ({
type: 'function',
function: func.function,
}))) || []),
].length > 0 ? [
...(((_k = request.tools) === null || _k === void 0 ? void 0 : _k.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_l = this.tools) === null || _l === void 0 ? void 0 : _l.map(func => ({
type: 'function',
function: func.function,
}))) || []),
] : undefined,
tool_choice: request.tool_choice,
response_format: this.convertResponseFormat((_m = request.generationConfig) === null || _m === void 0 ? void 0 : _m.responseFormat),
};
}
convertResponseFormat(responseFormat) {
if (!responseFormat)
return undefined;
// If it's a ResponseFormat instance, use its toOpenAI method
if (responseFormat instanceof ResponseFormat) {
return responseFormat.toOpenAI();
}
// Handle legacy format for backward compatibility
if (responseFormat.type === 'json_object' && responseFormat.schema) {
// Convert to new structured output format
return {
type: 'json_schema',
json_schema: {
name: 'response',
schema: responseFormat.schema,
strict: true
}
};
}
// Return as-is for other formats
return responseFormat;
}
convertToResponsesAPIFormat(request) {
var _a, _b, _c, _d, _f, _g, _h, _j;
// Responses APIでは、inputに単一のメッセージまたはメッセージ配列を送信
// 最新のメッセージをinputとして使用し、それ以前のメッセージはprevious_response_idで参照
const latestMessage = request.messages[request.messages.length - 1];
const content = this.normalizeContent(latestMessage.content);
let input;
// 単一のテキストメッセージの場合は文字列として送信
if (content.length === 1 && content[0].type === 'text') {
input = content[0].text;
}
else {
// マルチモーダルコンテンツの場合は配列として送信
input = content.map(c => {
switch (c.type) {
case 'text':
return {
type: 'input_text',
text: c.text
};
case 'image':
return {
type: 'input_image',
image_url: {
url: c.source.url || `data:${c.source.mediaType};base64,${c.source.data}`,
},
};
case 'tool_result':
// tool_resultはtool_result_contentとして送信
return {
type: 'tool_result_content',
toolUseId: c.toolUseId,
content: Array.isArray(c.content)
? c.content.map(item => item.type === 'text' ? item.text : '[Non-text content]').join('\n')
: '[Tool result]'
};
default:
return {
type: 'input_text',
text: '[Unsupported content type]'
};
}
});
}
return {
model: request.model || this.model,
input,
temperature: (_a = request.generationConfig) === null || _a === void 0 ? void 0 : _a.temperature,
max_outputTokens: (_b = request.generationConfig) === null || _b === void 0 ? void 0 : _b.max_tokens,
top_p: (_c = request.generationConfig) === null || _c === void 0 ? void 0 : _c.top_p,
tools: [
...(((_d = request.tools) === null || _d === void 0 ? void 0 : _d.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_f = this.tools) === null || _f === void 0 ? void 0 : _f.map(func => ({
type: 'function',
function: func.function,
}))) || []),
].length > 0 ? [
...(((_g = request.tools) === null || _g === void 0 ? void 0 : _g.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_h = this.tools) === null || _h === void 0 ? void 0 : _h.map(func => ({
type: 'function',
function: func.function,
}))) || []),
] : undefined,
tool_choice: request.tool_choice,
text: ((_j = request.generationConfig) === null || _j === void 0 ? void 0 : _j.responseFormat) ? {
format: request.generationConfig.responseFormat
} : undefined,
// TODO: previous_response_idの管理方法を検討
previous_response_id: undefined,
store: true,
};
}
convertFromOpenAIFormat(response) {
const choice = response.choices[0];
const message = choice.message;
const content = [];
if (message.content) {
content.push({ type: 'text', text: message.content });
}
if (message.tool_calls) {
message.tool_calls.forEach(toolCall => {
if (toolCall.type === 'function') {
content.push({
type: 'tool_use',
id: toolCall.id,
name: toolCall.function.name,
input: JSON.parse(toolCall.function.arguments),
});
}
});
}
const unifiedMessage = {
id: this.generateMessageId(),
role: message.role,
content,
createdAt: new Date(),
};
const usage = response.usage ? {
inputTokens: response.usage.prompt_tokens,
outputTokens: response.usage.completion_tokens,
totalTokens: response.usage.total_tokens,
} : undefined;
// Extract text for convenience field
const contentArray = Array.isArray(unifiedMessage.content) ? unifiedMessage.content : [{ type: 'text', text: unifiedMessage.content }];
const textContent = contentArray.find((c) => c.type === 'text');
return {
id: response.id,
model: response.model,
provider: 'openai',
message: unifiedMessage,
text: (textContent === null || textContent === void 0 ? void 0 : textContent.text) || '',
usage,
finish_reason: choice.finish_reason,
createdAt: new Date(response.created * 1000),
rawResponse: response,
};
}
convertFromResponsesAPIFormat(response) {
var _a;
// Responses APIはoutput配列にメッセージを含む
const outputMessage = (_a = response.output) === null || _a === void 0 ? void 0 : _a.find((item) => item.type === 'message');
if (!outputMessage) {
throw new Error('No message in response output');
}
const content = [];
// outputMessage.contentから内容を抽出
if (outputMessage.content) {
outputMessage.content.forEach((item) => {
switch (item.type) {
case 'output_text':
content.push({ type: 'text', text: item.text });
break;
case 'tool_use':
content.push({
type: 'tool_use',
id: item.id,
name: item.name,
input: item.input,
});
break;
}
});
}
const unifiedMessage = {
id: outputMessage.id || this.generateMessageId(),
role: outputMessage.role || 'assistant',
content,
createdAt: new Date(),
};
const usage = response.usage ? {
inputTokens: response.usage.inputTokens,
outputTokens: response.usage.outputTokens,
totalTokens: response.usage.total_tokens,
} : undefined;
// Extract text for convenience field
const contentArray = Array.isArray(unifiedMessage.content) ? unifiedMessage.content : [{ type: 'text', text: unifiedMessage.content }];
const textContent = contentArray.find((c) => c.type === 'text');
return {
id: response.id,
model: response.model,
provider: 'openai',
message: unifiedMessage,
text: (textContent === null || textContent === void 0 ? void 0 : textContent.text) || '',
usage,
finish_reason: outputMessage.status === 'completed' ? 'stop' : undefined,
createdAt: new Date(response.createdAt * 1000),
rawResponse: response,
};
}
convertStreamChunk(chunk) {
var _a;
const choice = chunk.choices[0];
const delta = choice.delta;
const content = [];
if (delta.content) {
content.push({ type: 'text', text: delta.content });
}
// Handle tool calls in streaming chunks
if (delta.tool_calls) {
for (const toolCallDelta of delta.tool_calls) {
// In streaming, we get partial tool calls, so we need to indicate this is a partial update
// The actual accumulation and execution happens in streamWithChatCompletions
content.push({
type: 'tool_use',
id: toolCallDelta.id || `partial-${toolCallDelta.index}`,
name: ((_a = toolCallDelta.function) === null || _a === void 0 ? void 0 : _a.name) || '',
input: {}, // Input will be accumulated in streamWithChatCompletions
});
}
}
const unifiedMessage = {
id: this.generateMessageId(),
role: delta.role || 'assistant',
content,
createdAt: new Date(),
};
// Extract text for convenience field
const contentArray = Array.isArray(unifiedMessage.content) ? unifiedMessage.content : [{ type: 'text', text: unifiedMessage.content }];
const textContent = contentArray.find((c) => c.type === 'text');
return {
id: chunk.id,
model: chunk.model,
provider: 'openai',
message: unifiedMessage,
text: (textContent === null || textContent === void 0 ? void 0 : textContent.text) || '',
finish_reason: choice.finish_reason,
createdAt: new Date(chunk.created * 1000),
rawResponse: chunk,
};
}
convertResponsesStreamChunk(chunk) {
var _a, _b;
// Responses APIのストリーミングフォーマットに対応
const content = [];
if ((_a = chunk.delta) === null || _a === void 0 ? void 0 : _a.content) {
content.push({ type: 'text', text: chunk.delta.content });
}
const unifiedMessage = {
id: this.generateMessageId(),
role: ((_b = chunk.delta) === null || _b === void 0 ? void 0 : _b.role) || 'assistant',
content,
createdAt: new Date(),
};
// Extract text for convenience field
const contentArray = Array.isArray(unifiedMessage.content) ? unifiedMessage.content : [{ type: 'text', text: unifiedMessage.content }];
const textContent = contentArray.find((c) => c.type === 'text');
return {
id: chunk.id,
model: chunk.model,
provider: 'openai',
message: unifiedMessage,
text: (textContent === null || textContent === void 0 ? void 0 : textContent.text) || '',
finish_reason: chunk.status === 'completed' ? 'stop' : undefined,
createdAt: new Date(chunk.createdAt || Date.now()),
rawResponse: chunk,
};
}
handleError(error) {
if (error instanceof OpenAI.APIError) {
return {
code: error.code || 'openai_error',
message: error.message,
type: this.mapErrorType(error.status),
statusCode: error.status,
provider: 'openai',
details: error,
};
}
return {
code: 'unknown_error',
message: error.message || 'Unknown error occurred',
type: 'api_error',
provider: 'openai',
details: error,
};
}
mapErrorType(status) {
if (!status)
return 'api_error';
if (status === 429)
return 'rate_limit';
if (status === 401)
return 'authentication';
if (status >= 400 && status < 500)
return 'invalid_request';
if (status >= 500)
return 'server_error';
return 'api_error';
}
}
//# sourceMappingURL=provider.js.map