jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
107 lines (106 loc) • 4.24 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.TestProvider = void 0;
const providers_1 = require("../providers");
const shared_1 = require("../shared");
class TestProvider {
constructor(config = {}) {
this.name = config.name || "test-provider";
this.defaultResponse = config.defaultResponse || "This is a test response";
this.defaultStreamResponse = config.defaultStreamResponse || ["This ", "is ", "a ", "test ", "response"];
this.simulateDelay = config.simulateDelay || 0;
this.failOnModels = config.failOnModels || [];
}
async delay() {
if (this.simulateDelay > 0) {
await new Promise((resolve) => setTimeout(resolve, this.simulateDelay));
}
}
async generateResponse(model, messages, config = {}) {
await this.delay();
if (this.failOnModels.includes(model)) {
throw new Error(`Model ${model} is configured to fail`);
}
// If tools are provided and toolChoice is not "none", return a tool call
if (config.tools && config.toolChoice !== "none") {
const tool = config.tools.tools[0]; // Use first available tool
if (tool) {
return {
...(0, providers_1.generateAssistantMessage)(null, null, [
{
id: "test-tool-call",
request: {
id: "test-function-call",
function: {
name: tool.name,
arguments: { test: "value" },
},
},
approvalState: tool.requiresConfirmation ? "requiresApproval" : "noApprovalRequired",
executionState: "pending",
result: null,
error: null,
},
]),
meta: {
model,
provider: this.name,
temperature: config.temperature ?? undefined,
durationMs: this.simulateDelay,
inputTokens: 10,
outputTokens: 10,
},
};
}
}
return {
...(0, providers_1.generateAssistantMessage)(this.defaultResponse, null),
meta: {
model,
provider: this.name,
temperature: config.temperature ?? undefined,
durationMs: this.simulateDelay,
inputTokens: 10,
outputTokens: 10,
},
};
}
async *generateResponseStream(model, messages, config = {}) {
if (this.failOnModels.includes(model)) {
throw new Error(`Model ${model} is configured to fail`);
}
for (const chunk of this.defaultStreamResponse) {
await this.delay();
yield { type: "chunk", content: chunk, chunkId: (0, shared_1.generateUniqueId)() };
}
yield {
type: "response",
role: "assistant",
content: this.defaultStreamResponse.join(""),
reasoningContent: null,
meta: {
model,
provider: this.name,
temperature: config.temperature ?? undefined,
durationMs: this.simulateDelay * this.defaultStreamResponse.length,
inputTokens: 10,
outputTokens: 10,
},
};
}
async getAvailableModels() {
await this.delay();
return ["test-model-1", "test-model-2", "test-model-3"];
}
async createEmbedding(model, text) {
await this.delay();
if (this.failOnModels.includes(model)) {
throw new Error(`Model ${model} is configured to fail`);
}
// Return a deterministic embedding based on input text length
return Array(10)
.fill(0)
.map((_, i) => (text.length + i) / 100);
}
}
exports.TestProvider = TestProvider;