@unified-llm/core
Version:
Unified LLM interface.
539 lines • 23.1 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIProvider = void 0;
const openai_1 = __importDefault(require("openai"));
const validation_1 = require("../../utils/validation");
const base_provider_1 = __importDefault(require("../base-provider"));
class OpenAIProvider extends base_provider_1.default {
constructor({ apiKey, model, tools, options }) {
super({ model: model, tools });
this.apiKey = apiKey;
this.useResponsesAPI = (options === null || options === void 0 ? void 0 : options.useResponsesAPI) || false;
this.client = new openai_1.default({ apiKey });
}
async chat(request) {
(0, validation_1.validateChatRequest)(request);
try {
if (this.useResponsesAPI) {
return this.chatWithResponsesAPI(request);
}
else {
return this.chatWithChatCompletions(request);
}
}
catch (error) {
throw this.handleError(error);
}
}
async chatWithChatCompletions(request) {
if (!request.model && !this.model) {
throw new Error('Model is required for OpenAI chat completions');
}
const openAIRequest = this.convertToOpenAIFormat(request);
let response = await this.client.chat.completions.create(openAIRequest);
let messages = [...openAIRequest.messages];
// ツール呼び出しがある場合、実行して結果を返す
while (response.choices[0].finish_reason === 'tool_calls' && this.tools) {
const toolCalls = response.choices[0].message.tool_calls;
const toolResults = [];
if (toolCalls) {
for (const toolCall of toolCalls) {
if (toolCall.type === 'function') {
const customFunction = this.tools.find(func => func.function.name === toolCall.function.name);
if (customFunction) {
try {
// CustomFunctionのargsとfunction callのargsをマージ
const mergedArgs = {
...(customFunction.args || {}),
...JSON.parse(toolCall.function.arguments)
};
const result = await customFunction.handler(mergedArgs);
toolResults.push({
role: 'tool',
content: typeof result === 'string' ? result : JSON.stringify(result),
tool_call_id: toolCall.id,
});
}
catch (error) {
toolResults.push({
role: 'tool',
content: error instanceof Error ? error.message : 'Unknown error',
tool_call_id: toolCall.id,
});
}
}
}
}
}
// ツール実行結果を含めて再度リクエスト
if (toolResults.length > 0) {
messages = [
...messages,
response.choices[0].message,
...toolResults,
];
const followUpRequest = {
...openAIRequest,
messages,
};
response = await this.client.chat.completions.create(followUpRequest);
}
else {
break;
}
}
return this.convertFromOpenAIFormat(response);
}
async chatWithResponsesAPI(request) {
var _a;
// NOTE: Responses API is not yet available in the OpenAI SDK
// This implementation uses the raw HTTP client to call the new API
const responsesRequest = this.convertToResponsesAPIFormat(request);
// Make raw HTTP request to the Responses API
const response = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
'OpenAI-Beta': 'responses-v1',
},
body: JSON.stringify(responsesRequest),
});
if (!response.ok) {
const error = await response.json();
throw new Error(((_a = error.error) === null || _a === void 0 ? void 0 : _a.message) || 'API request failed');
}
const data = await response.json();
return this.convertFromResponsesAPIFormat(data);
}
async *stream(request) {
(0, validation_1.validateChatRequest)(request);
if (this.useResponsesAPI) {
yield* this.streamWithResponsesAPI(request);
}
else {
yield* this.streamWithChatCompletions(request);
}
}
async *streamWithChatCompletions(request) {
const openAIRequest = this.convertToOpenAIFormat(request);
const stream = await this.client.chat.completions.create({
...openAIRequest,
stream: true,
});
for await (const chunk of stream) {
yield this.convertStreamChunk(chunk);
}
}
async *streamWithResponsesAPI(request) {
var _a, _b;
const responsesRequest = this.convertToResponsesAPIFormat(request);
// Make raw HTTP request to the Responses API with streaming
const response = await fetch('https://api.openai.com/v1/responses', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
'Accept': 'text/event-stream',
'OpenAI-Beta': 'responses-v1',
},
body: JSON.stringify({
...responsesRequest,
stream: true,
}),
});
if (!response.ok) {
const error = await response.json();
throw new Error(((_a = error.error) === null || _a === void 0 ? void 0 : _a.message) || 'API request failed');
}
// Parse SSE stream
const reader = (_b = response.body) === null || _b === void 0 ? void 0 : _b.getReader();
if (!reader)
throw new Error('No response body');
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done)
break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]')
continue;
try {
const chunk = JSON.parse(data);
yield this.convertResponsesStreamChunk(chunk);
}
catch (_e) {
// Ignore parse errors
}
}
}
}
}
convertToOpenAIFormat(request) {
var _a, _b, _c, _d, _f, _g, _h, _j, _k, _l, _m;
const model = request.model || this.model;
if (!model) {
throw new Error('Model is required for OpenAI chat completions');
}
const messages = request.messages.map(msg => {
const content = this.normalizeContent(msg.content);
// tool_resultメッセージの特別処理
if (msg.role === 'tool' || content.some(c => c.type === 'tool_result')) {
const toolResults = content.filter(c => c.type === 'tool_result');
if (toolResults.length > 0) {
return toolResults.map(tr => ({
role: 'tool',
content: Array.isArray(tr.content)
? tr.content.map(item => item.type === 'text' ? item.text : '[Non-text content]').join('\n')
: '[Tool result]',
tool_call_id: tr.tool_use_id,
}));
}
}
// システムメッセージの処理 - OpenAIではmessages配列内でrole: "system"として送信
if (msg.role === 'system') {
return {
role: 'system',
content: content.length === 1 && content[0].type === 'text'
? content[0].text
: content.filter(c => c.type === 'text').map(c => c.text).join('\n') || '[System message]',
};
}
// OpenAIは単一のテキストメッセージの場合、文字列として送信
if (content.length === 1 && content[0].type === 'text') {
return {
role: msg.role,
content: content[0].text,
name: msg.name,
};
}
// tool_useコンテンツがある場合の特別処理
const toolUseContents = content.filter(c => c.type === 'tool_use');
if (toolUseContents.length > 0) {
// assistantメッセージでtool_callsを含む場合
const textContent = content.filter(c => c.type === 'text').map(c => c.text).join('\n');
return {
role: msg.role,
content: textContent || null,
tool_calls: toolUseContents.map(toolUse => ({
id: toolUse.id,
type: 'function',
function: {
name: toolUse.name,
arguments: JSON.stringify(toolUse.input)
}
})),
name: msg.name,
};
}
// マルチモーダルコンテンツの変換
const openAIContent = content.map(c => {
switch (c.type) {
case 'text':
return { type: 'text', text: c.text };
case 'image':
return {
type: 'image_url',
image_url: {
url: c.source.url || `data:${c.source.media_type};base64,${c.source.data}`,
},
};
default:
return { type: 'text', text: '[Unsupported content type]' };
}
});
return {
role: msg.role,
content: openAIContent,
name: msg.name,
};
}).flat(); // tool_resultで配列になる可能性があるのでflatten
return {
model: model,
messages,
temperature: (_a = request.generation_config) === null || _a === void 0 ? void 0 : _a.temperature,
max_tokens: (_b = request.generation_config) === null || _b === void 0 ? void 0 : _b.max_tokens,
top_p: (_c = request.generation_config) === null || _c === void 0 ? void 0 : _c.top_p,
frequency_penalty: (_d = request.generation_config) === null || _d === void 0 ? void 0 : _d.frequency_penalty,
presence_penalty: (_f = request.generation_config) === null || _f === void 0 ? void 0 : _f.presence_penalty,
stop: (_g = request.generation_config) === null || _g === void 0 ? void 0 : _g.stop_sequences,
tools: [
...(((_h = request.tools) === null || _h === void 0 ? void 0 : _h.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_j = this.tools) === null || _j === void 0 ? void 0 : _j.map(func => ({
type: 'function',
function: func.function,
}))) || []),
].length > 0 ? [
...(((_k = request.tools) === null || _k === void 0 ? void 0 : _k.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_l = this.tools) === null || _l === void 0 ? void 0 : _l.map(func => ({
type: 'function',
function: func.function,
}))) || []),
] : undefined,
tool_choice: request.tool_choice,
response_format: (_m = request.generation_config) === null || _m === void 0 ? void 0 : _m.response_format,
};
}
convertToResponsesAPIFormat(request) {
var _a, _b, _c, _d, _f, _g, _h, _j;
// Responses APIでは、inputに単一のメッセージまたはメッセージ配列を送信
// 最新のメッセージをinputとして使用し、それ以前のメッセージはprevious_response_idで参照
const latestMessage = request.messages[request.messages.length - 1];
const content = this.normalizeContent(latestMessage.content);
let input;
// 単一のテキストメッセージの場合は文字列として送信
if (content.length === 1 && content[0].type === 'text') {
input = content[0].text;
}
else {
// マルチモーダルコンテンツの場合は配列として送信
input = content.map(c => {
switch (c.type) {
case 'text':
return {
type: 'input_text',
text: c.text
};
case 'image':
return {
type: 'input_image',
image_url: {
url: c.source.url || `data:${c.source.media_type};base64,${c.source.data}`,
},
};
case 'tool_result':
// tool_resultはtool_result_contentとして送信
return {
type: 'tool_result_content',
tool_use_id: c.tool_use_id,
content: Array.isArray(c.content)
? c.content.map(item => item.type === 'text' ? item.text : '[Non-text content]').join('\n')
: '[Tool result]'
};
default:
return {
type: 'input_text',
text: '[Unsupported content type]'
};
}
});
}
return {
model: request.model || this.model,
input,
temperature: (_a = request.generation_config) === null || _a === void 0 ? void 0 : _a.temperature,
max_output_tokens: (_b = request.generation_config) === null || _b === void 0 ? void 0 : _b.max_tokens,
top_p: (_c = request.generation_config) === null || _c === void 0 ? void 0 : _c.top_p,
tools: [
...(((_d = request.tools) === null || _d === void 0 ? void 0 : _d.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_f = this.tools) === null || _f === void 0 ? void 0 : _f.map(func => ({
type: 'function',
function: func.function,
}))) || []),
].length > 0 ? [
...(((_g = request.tools) === null || _g === void 0 ? void 0 : _g.map(tool => ({
type: 'function',
function: tool.function,
}))) || []),
...(((_h = this.tools) === null || _h === void 0 ? void 0 : _h.map(func => ({
type: 'function',
function: func.function,
}))) || []),
] : undefined,
tool_choice: request.tool_choice,
text: ((_j = request.generation_config) === null || _j === void 0 ? void 0 : _j.response_format) ? {
format: request.generation_config.response_format
} : undefined,
// TODO: previous_response_idの管理方法を検討
previous_response_id: undefined,
store: true,
};
}
convertFromOpenAIFormat(response) {
const choice = response.choices[0];
const message = choice.message;
const content = [];
if (message.content) {
content.push({ type: 'text', text: message.content });
}
if (message.tool_calls) {
message.tool_calls.forEach(toolCall => {
if (toolCall.type === 'function') {
content.push({
type: 'tool_use',
id: toolCall.id,
name: toolCall.function.name,
input: JSON.parse(toolCall.function.arguments),
});
}
});
}
const unifiedMessage = {
id: this.generateMessageId(),
role: message.role,
content,
created_at: new Date(),
};
const usage = response.usage ? {
input_tokens: response.usage.prompt_tokens,
output_tokens: response.usage.completion_tokens,
total_tokens: response.usage.total_tokens,
} : undefined;
return {
id: response.id,
model: response.model,
provider: 'openai',
message: unifiedMessage,
usage,
finish_reason: choice.finish_reason,
created_at: new Date(response.created * 1000),
raw_response: response,
};
}
convertFromResponsesAPIFormat(response) {
var _a;
// Responses APIはoutput配列にメッセージを含む
const outputMessage = (_a = response.output) === null || _a === void 0 ? void 0 : _a.find((item) => item.type === 'message');
if (!outputMessage) {
throw new Error('No message in response output');
}
const content = [];
// outputMessage.contentから内容を抽出
if (outputMessage.content) {
outputMessage.content.forEach((item) => {
switch (item.type) {
case 'output_text':
content.push({ type: 'text', text: item.text });
break;
case 'tool_use':
content.push({
type: 'tool_use',
id: item.id,
name: item.name,
input: item.input,
});
break;
}
});
}
const unifiedMessage = {
id: outputMessage.id || this.generateMessageId(),
role: outputMessage.role || 'assistant',
content,
created_at: new Date(),
};
const usage = response.usage ? {
input_tokens: response.usage.input_tokens,
output_tokens: response.usage.output_tokens,
total_tokens: response.usage.total_tokens,
} : undefined;
return {
id: response.id,
model: response.model,
provider: 'openai',
message: unifiedMessage,
usage,
finish_reason: outputMessage.status === 'completed' ? 'stop' : undefined,
created_at: new Date(response.created_at * 1000),
raw_response: response,
};
}
convertStreamChunk(chunk) {
const choice = chunk.choices[0];
const delta = choice.delta;
const content = [];
if (delta.content) {
content.push({ type: 'text', text: delta.content });
}
const unifiedMessage = {
id: this.generateMessageId(),
role: delta.role || 'assistant',
content,
created_at: new Date(),
};
return {
id: chunk.id,
model: chunk.model,
provider: 'openai',
message: unifiedMessage,
finish_reason: choice.finish_reason,
created_at: new Date(chunk.created * 1000),
raw_response: chunk,
};
}
convertResponsesStreamChunk(chunk) {
var _a, _b;
// Responses APIのストリーミングフォーマットに対応
const content = [];
if ((_a = chunk.delta) === null || _a === void 0 ? void 0 : _a.content) {
content.push({ type: 'text', text: chunk.delta.content });
}
const unifiedMessage = {
id: this.generateMessageId(),
role: ((_b = chunk.delta) === null || _b === void 0 ? void 0 : _b.role) || 'assistant',
content,
created_at: new Date(),
};
return {
id: chunk.id,
model: chunk.model,
provider: 'openai',
message: unifiedMessage,
finish_reason: chunk.status === 'completed' ? 'stop' : undefined,
created_at: new Date(chunk.created_at || Date.now()),
raw_response: chunk,
};
}
handleError(error) {
if (error instanceof openai_1.default.APIError) {
return {
code: error.code || 'openai_error',
message: error.message,
type: this.mapErrorType(error.status),
status_code: error.status,
provider: 'openai',
details: error,
};
}
return {
code: 'unknown_error',
message: error.message || 'Unknown error occurred',
type: 'api_error',
provider: 'openai',
details: error,
};
}
mapErrorType(status) {
if (!status)
return 'api_error';
if (status === 429)
return 'rate_limit';
if (status === 401)
return 'authentication';
if (status >= 400 && status < 500)
return 'invalid_request';
if (status >= 500)
return 'server_error';
return 'api_error';
}
}
exports.OpenAIProvider = OpenAIProvider;
//# sourceMappingURL=provider.js.map