@langchain/core
Version:
Core LangChain.js abstractions and schemas
58 lines (56 loc) • 1.58 kB
JavaScript
import { LLM } from "../../language_models/llms.js";
//#region src/utils/testing/llms.ts
var FakeLLM = class extends LLM {
response;
thrownErrorString;
constructor(fields) {
super(fields);
this.response = fields.response;
this.thrownErrorString = fields.thrownErrorString;
}
_llmType() {
return "fake";
}
async _call(prompt, _options, runManager) {
if (this.thrownErrorString) throw new Error(this.thrownErrorString);
const response = this.response ?? prompt;
await runManager?.handleLLMNewToken(response);
return response;
}
};
var FakeStreamingLLM = class extends LLM {
sleep = 50;
responses;
thrownErrorString;
constructor(fields) {
super(fields);
this.sleep = fields.sleep ?? this.sleep;
this.responses = fields.responses;
this.thrownErrorString = fields.thrownErrorString;
}
_llmType() {
return "fake";
}
async _call(prompt) {
if (this.thrownErrorString) throw new Error(this.thrownErrorString);
const response = this.responses?.[0];
this.responses = this.responses?.slice(1);
return response ?? prompt;
}
async *_streamResponseChunks(input, _options, runManager) {
if (this.thrownErrorString) throw new Error(this.thrownErrorString);
const response = this.responses?.[0];
this.responses = this.responses?.slice(1);
for (const c of response ?? input) {
await new Promise((resolve) => setTimeout(resolve, this.sleep));
yield {
text: c,
generationInfo: {}
};
await runManager?.handleLLMNewToken(c);
}
}
};
//#endregion
export { FakeLLM, FakeStreamingLLM };
//# sourceMappingURL=llms.js.map