@mastra/core
Version:
Mastra is the Typescript framework for building AI agents and assistants. It’s used by some of the largest companies in the world to build internal AI automation tooling and customer-facing agents.
140 lines (138 loc) • 4.13 kB
JavaScript
import { MastraLLMV1 } from '../chunk-EMPBDL5C.js';
import { simulateReadableStream } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';
function createMockModel({
objectGenerationMode,
mockText,
spyGenerate,
spyStream
}) {
const mockModel = new MockLanguageModelV1({
defaultObjectGenerationMode: objectGenerationMode,
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
if (objectGenerationMode === "json") {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: JSON.stringify(mockText)
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: typeof mockText === "string" ? mockText : JSON.stringify(mockText)
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
const chunks = text.split(" ").map((word) => ({
type: "text-delta",
textDelta: word + " "
}));
return {
stream: simulateReadableStream({
chunks: [
...chunks,
{
type: "finish",
finishReason: "stop",
logprobs: void 0,
usage: { completionTokens: 10, promptTokens: 3 }
}
]
}),
rawCall: { rawPrompt: null, rawSettings: {} }
};
}
});
return mockModel;
}
var MockProvider = class extends MastraLLMV1 {
constructor({
spyGenerate,
spyStream,
objectGenerationMode,
mockText = "Hello, world!"
}) {
const mockModel = new MockLanguageModelV1({
defaultObjectGenerationMode: objectGenerationMode,
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
if (objectGenerationMode === "json") {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: JSON.stringify(mockText)
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: typeof mockText === "string" ? mockText : JSON.stringify(mockText)
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
const chunks = text.split(" ").map((word) => ({
type: "text-delta",
textDelta: word + " "
}));
return {
stream: simulateReadableStream({
chunks: [
...chunks,
{
type: "finish",
finishReason: "stop",
logprobs: void 0,
usage: { completionTokens: 10, promptTokens: 3 }
}
]
}),
rawCall: { rawPrompt: null, rawSettings: {} }
};
}
});
super({ model: mockModel });
}
// @ts-ignore
stream(...args) {
const result = super.stream(...args);
return {
...result,
// @ts-ignore on await read the stream
then: (onfulfilled, onrejected) => {
return result.baseStream.pipeTo(new WritableStream()).then(onfulfilled, onrejected);
}
};
}
// @ts-ignore
__streamObject(...args) {
const result = super.__streamObject(...args);
return {
...result,
// @ts-ignore on await read the stream
then: (onfulfilled, onrejected) => {
return result.baseStream.pipeTo(new WritableStream()).then(onfulfilled, onrejected);
}
};
}
};
export { MockProvider, createMockModel };
//# sourceMappingURL=llm-mock.js.map
//# sourceMappingURL=llm-mock.js.map