@unified-llm/core
Version:
Unified LLM interface (in-memory).
58 lines • 2.45 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIProvider = void 0;
const openai_1 = __importDefault(require("openai"));
const base_provider_js_1 = __importDefault(require("../base-provider.js"));
const agent_provider_js_1 = require("./agent-provider.js");
const completion_provider_js_1 = require("./completion-provider.js");
const responses_provider_js_1 = require("./responses-provider.js");
class OpenAIProvider extends base_provider_js_1.default {
constructor(options) {
var _a;
super({ model: options.model, tools: options.tools });
if (options.mcpServers) {
// Build a per-provider OpenAI client to inject into the Agents SDK
const client = new openai_1.default({ apiKey: options.apiKey, baseURL: options.baseURL });
this.provider = new agent_provider_js_1.OpenAIAgentProvider({
client,
model: options.model,
tools: options.tools,
mcpServers: options.mcpServers,
// Default to Responses API for Agents; can be extended to be configurable
openaiApi: 'responses',
logLevel: options.logLevel,
});
}
else {
if ((_a = options.options) === null || _a === void 0 ? void 0 : _a.useResponsesAPI) {
this.provider = new responses_provider_js_1.OpenAIResponsesProvider({
apiKey: options.apiKey,
model: options.model,
baseURL: options.baseURL,
tools: options.tools,
logLevel: options.logLevel,
});
}
else {
this.provider = new completion_provider_js_1.OpenAICompletionProvider({
apiKey: options.apiKey,
model: options.model,
baseURL: options.baseURL,
tools: options.tools,
logLevel: options.logLevel,
});
}
}
}
async chat(request) {
return this.provider.chat(request);
}
async *stream(request) {
yield* this.provider.stream(request);
}
}
exports.OpenAIProvider = OpenAIProvider;
//# sourceMappingURL=provider.js.map