@robota-sdk/openai
Version:
OpenAI integration for Robota SDK - GPT-4, GPT-3.5, function calling, and tool integration with OpenAI's API
438 lines (434 loc) • 14.3 kB
JavaScript
import OpenAI from 'openai';
import { BaseAIProvider, SilentLogger } from '@robota-sdk/agents';
// src/provider.ts
var OpenAIResponseParser = class {
logger;
constructor(logger) {
this.logger = logger || SilentLogger;
}
/**
* Parse complete OpenAI chat completion response
*
* @param response - Raw OpenAI API response
* @returns Standardized universal message
*/
parseResponse(response) {
try {
const choice = response.choices?.[0];
if (!choice) {
throw new Error("No choices found in OpenAI response");
}
const message = choice.message;
const content = message.content || "";
const toolCalls = message.tool_calls?.map((toolCall) => ({
id: toolCall.id,
type: "function",
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments
}
})) || [];
const usage = response.usage ? {
promptTokens: response.usage.prompt_tokens,
completionTokens: response.usage.completion_tokens,
totalTokens: response.usage.total_tokens
} : void 0;
const result = {
role: "assistant",
content,
timestamp: /* @__PURE__ */ new Date(),
...toolCalls.length > 0 && { toolCalls },
...usage && { usage },
metadata: {
finishReason: choice.finish_reason || void 0
}
};
return result;
} catch (error) {
const errorMessage = error instanceof Error ? error.message : "OpenAI response parsing failed";
this.logger.error("Response parsing failed", { error: errorMessage });
throw new Error(`OpenAI response parsing failed: ${errorMessage}`);
}
}
/**
* Parse OpenAI streaming chunk
*
* @param chunk - Raw streaming chunk from OpenAI API
* @returns Parsed universal message or null if no content
*/
parseStreamingChunk(chunk) {
try {
const choice = chunk.choices?.[0];
if (!choice) {
return null;
}
const delta = choice.delta;
const finishReason = choice.finish_reason;
if (delta.tool_calls) {
const toolCalls = delta.tool_calls.map((toolCall) => ({
id: toolCall.id || "",
type: "function",
function: {
name: toolCall.function?.name || "",
arguments: toolCall.function?.arguments || ""
}
}));
return {
role: "assistant",
content: "",
timestamp: /* @__PURE__ */ new Date(),
toolCalls,
metadata: {
isStreamChunk: true,
isComplete: finishReason === "stop" || finishReason === "tool_calls"
}
};
}
const content = delta.content || "";
return {
role: "assistant",
content,
timestamp: /* @__PURE__ */ new Date(),
metadata: {
isStreamChunk: true,
isComplete: finishReason === "stop" || finishReason === "tool_calls"
}
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : "OpenAI chunk parsing failed";
this.logger.error("Chunk parsing failed", { error: errorMessage });
throw new Error(`OpenAI chunk parsing failed: ${errorMessage}`);
}
}
};
var OpenAIProvider = class extends BaseAIProvider {
name = "openai";
version = "1.0.0";
client;
options;
payloadLogger;
responseParser;
logger;
constructor(options) {
super();
this.options = options;
if (options.client) {
this.client = options.client;
} else if (options.apiKey) {
this.client = new OpenAI({
apiKey: options.apiKey,
...options.organization && { organization: options.organization },
...options.timeout && { timeout: options.timeout },
...options.baseURL && { baseURL: options.baseURL }
});
} else {
throw new Error("Either OpenAI client or apiKey is required");
}
this.logger = options.logger || SilentLogger;
this.responseParser = new OpenAIResponseParser(this.logger);
this.payloadLogger = this.initializePayloadLogger(options) ?? void 0;
}
/**
* Initialize payload logger
*/
initializePayloadLogger(options) {
return options.payloadLogger;
}
/**
* Generate response using UniversalMessage
*/
async chat(messages, options) {
this.validateMessages(messages);
try {
const openaiMessages = this.convertToOpenAIMessages(messages);
if (!options?.model) {
throw new Error("Model is required in ChatOptions. Please specify a model in defaultModel configuration.");
}
const requestParams = {
model: options.model,
messages: openaiMessages,
...options?.temperature !== void 0 && { temperature: options.temperature },
...options?.maxTokens && { max_tokens: options.maxTokens },
...options?.tools && {
tools: this.convertToOpenAITools(options.tools),
tool_choice: "auto"
}
};
if (this.payloadLogger?.isEnabled()) {
const logData = {
model: requestParams.model,
messagesCount: openaiMessages.length,
hasTools: !!requestParams.tools,
temperature: requestParams.temperature ?? void 0,
maxTokens: requestParams.max_tokens ?? void 0,
timestamp: (/* @__PURE__ */ new Date()).toISOString()
};
await this.payloadLogger.logPayload(logData, "chat");
}
const response = await this.client.chat.completions.create(requestParams);
return this.responseParser.parseResponse(response);
} catch (error) {
const openaiError = error;
const errorMessage = openaiError.message || "OpenAI API request failed";
throw new Error(`OpenAI chat failed: ${errorMessage}`);
}
}
/**
* Generate streaming response using UniversalMessage
*/
async *chatStream(messages, options) {
this.validateMessages(messages);
try {
const openaiMessages = this.convertToOpenAIMessages(messages);
if (!options?.model) {
throw new Error("Model is required in ChatOptions. Please specify a model in defaultModel configuration.");
}
const requestParams = {
model: options.model,
messages: openaiMessages,
stream: true,
...options?.temperature !== void 0 && { temperature: options.temperature },
...options?.maxTokens && { max_tokens: options.maxTokens },
...options?.tools && {
tools: this.convertToOpenAITools(options.tools),
tool_choice: "auto"
}
};
if (this.payloadLogger?.isEnabled()) {
const logData = {
model: requestParams.model,
messagesCount: openaiMessages.length,
hasTools: !!requestParams.tools,
temperature: requestParams.temperature ?? void 0,
maxTokens: requestParams.max_tokens ?? void 0,
timestamp: (/* @__PURE__ */ new Date()).toISOString()
};
await this.payloadLogger.logPayload(logData, "stream");
}
const stream = await this.client.chat.completions.create(requestParams);
for await (const chunk of stream) {
const universalMessage = this.responseParser.parseStreamingChunk(chunk);
if (universalMessage) {
yield universalMessage;
}
}
} catch (error) {
const openaiError = error;
const errorMessage = openaiError.message || "OpenAI API request failed";
throw new Error(`OpenAI stream failed: ${errorMessage}`);
}
}
supportsTools() {
return true;
}
validateConfig() {
return !!this.client && !!this.options;
}
async dispose() {
}
/**
* Convert UniversalMessage array to OpenAI format
*/
convertToOpenAIMessages(messages) {
return messages.map((msg) => {
switch (msg.role) {
case "user":
return {
role: "user",
content: msg.content || ""
};
case "assistant": {
const assistantMsg = msg;
if (assistantMsg.toolCalls && assistantMsg.toolCalls.length > 0) {
return {
role: "assistant",
// IMPORTANT: Preserve null for tool calls as per OpenAI API spec
content: assistantMsg.content === "" ? null : assistantMsg.content || null,
tool_calls: assistantMsg.toolCalls.map((toolCall) => ({
id: toolCall.id,
type: "function",
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments
}
}))
};
}
return {
role: "assistant",
content: msg.content || ""
};
}
case "system":
return {
role: "system",
content: msg.content || ""
};
case "tool":
return {
role: "tool",
content: msg.content || "",
tool_call_id: msg.toolCallId || ""
};
default:
throw new Error(`Unsupported message role: ${msg.role}`);
}
});
}
/**
* Convert tool schemas to OpenAI format
*/
convertToOpenAITools(tools) {
return tools.map((tool) => ({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters
}
}));
}
/**
* Validate messages before sending to API
*
* IMPORTANT: OpenAI API Content Handling Policy
* =============================================
*
* Based on OpenAI API documentation and community feedback:
*
* 1. When sending TO OpenAI API:
* - Assistant messages with tool_calls: content MUST be null (not empty string)
* - Regular assistant messages: content can be string or null
* - This prevents "400 Bad Request" errors
*
* 2. When receiving FROM our API (UniversalMessage):
* - All messages must have content as string (TypeScript requirement)
* - Convert null to empty string for type compatibility
*
* 3. This dual handling ensures:
* - OpenAI API compatibility (null for tool calls)
* - TypeScript type safety (string content in UniversalMessage)
* - No infinite loops in tool execution
*
* Reference: OpenAI Community discussions confirm that tool_calls
* require content to be null, not empty string.
*/
validateMessages(messages) {
super.validateMessages(messages);
for (const message of messages) {
if (message.role === "assistant") {
const assistantMsg = message;
if (assistantMsg.toolCalls && assistantMsg.toolCalls.length > 0 && assistantMsg.content === "") {
continue;
}
}
}
}
};
// src/adapter.ts
var OpenAIConversationAdapter = class {
/**
* Filter messages for OpenAI compatibility
*
* OpenAI has specific requirements:
* - Tool messages must have valid toolCallId
* - Messages must be in proper sequence
* - Tool messages without toolCallId should be excluded
*/
static filterMessagesForOpenAI(messages) {
return messages.filter((msg) => {
if (msg.role === "user" || msg.role === "assistant" || msg.role === "system") {
return true;
}
if (msg.role === "tool") {
const toolMsg = msg;
return !!(toolMsg.toolCallId && toolMsg.toolCallId.trim() !== "" && toolMsg.toolCallId !== "unknown");
}
return false;
});
}
/**
* Convert UniversalMessage array to OpenAI message format
* Now properly handles tool messages for OpenAI's tool calling feature
*/
static toOpenAIFormat(messages) {
const filteredMessages = this.filterMessagesForOpenAI(messages);
return filteredMessages.map((msg) => this.convertMessage(msg));
}
/**
* Convert a single UniversalMessage to OpenAI format
* Handles all message types including tool messages
*/
static convertMessage(msg) {
const messageRole = msg.role;
if (messageRole === "user") {
const userMsg = msg;
return {
role: "user",
content: userMsg.content || ""
};
}
if (messageRole === "assistant") {
const assistantMsg = msg;
if (assistantMsg.toolCalls && assistantMsg.toolCalls.length > 0) {
const result = {
role: "assistant",
// CRITICAL: OpenAI API requires content to be null (not empty string) when tool_calls are present
// VERIFIED: 2024-12 - This prevents "400 Bad Request" errors from OpenAI API
// DO NOT CHANGE without testing against actual OpenAI API
content: assistantMsg.content === "" ? null : assistantMsg.content || null,
tool_calls: assistantMsg.toolCalls.map((toolCall) => ({
id: toolCall.id,
type: "function",
function: {
name: toolCall.function.name,
arguments: toolCall.function.arguments
}
}))
};
return result;
}
return {
role: "assistant",
content: assistantMsg.content === null ? null : assistantMsg.content === "" ? null : assistantMsg.content || ""
};
}
if (messageRole === "system") {
const systemMsg = msg;
return {
role: "system",
content: systemMsg.content || ""
};
}
if (messageRole === "tool") {
const toolMsg = msg;
if (!toolMsg.toolCallId || toolMsg.toolCallId.trim() === "") {
throw new Error(`Tool message missing toolCallId: ${JSON.stringify(toolMsg)}`);
}
const result = {
role: "tool",
content: toolMsg.content || "",
tool_call_id: toolMsg.toolCallId
};
return result;
}
throw new Error(`Unsupported message role: ${msg.role}`);
}
/**
* Add system prompt to message array if needed
*/
static addSystemPromptIfNeeded(messages, systemPrompt) {
if (!systemPrompt) {
return messages;
}
const hasSystemMessage = messages.some((msg) => msg.role === "system");
if (hasSystemMessage) {
return messages;
}
return [
{ role: "system", content: systemPrompt },
...messages
];
}
};
export { OpenAIConversationAdapter, OpenAIProvider };