@unified-llm/core
Version:
Unified LLM interface (in-memory).
51 lines • 2.02 kB
JavaScript
import OpenAI from 'openai';
import BaseProvider from '../base-provider.js';
import { OpenAIAgentProvider } from './agent-provider.js';
import { OpenAICompletionProvider } from './completion-provider.js';
import { OpenAIResponsesProvider } from './responses-provider.js';
export class OpenAIProvider extends BaseProvider {
constructor(options) {
var _a;
super({ model: options.model, tools: options.tools });
if (options.mcpServers) {
// Build a per-provider OpenAI client to inject into the Agents SDK
const client = new OpenAI({ apiKey: options.apiKey, baseURL: options.baseURL });
this.provider = new OpenAIAgentProvider({
client,
model: options.model,
tools: options.tools,
mcpServers: options.mcpServers,
// Default to Responses API for Agents; can be extended to be configurable
openaiApi: 'responses',
logLevel: options.logLevel,
});
}
else {
if ((_a = options.options) === null || _a === void 0 ? void 0 : _a.useResponsesAPI) {
this.provider = new OpenAIResponsesProvider({
apiKey: options.apiKey,
model: options.model,
baseURL: options.baseURL,
tools: options.tools,
logLevel: options.logLevel,
});
}
else {
this.provider = new OpenAICompletionProvider({
apiKey: options.apiKey,
model: options.model,
baseURL: options.baseURL,
tools: options.tools,
logLevel: options.logLevel,
});
}
}
}
async chat(request) {
return this.provider.chat(request);
}
async *stream(request) {
yield* this.provider.stream(request);
}
}
//# sourceMappingURL=provider.js.map