@mastra/core
Version:
Mastra is a framework for building AI-powered applications and agents with a modern TypeScript stack.
178 lines (176 loc) • 5.3 kB
JavaScript
import { MastraLLMV1 } from '../chunk-6D2K2CAA.js';
import { simulateReadableStream } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';
import { MockLanguageModelV2, convertArrayToReadableStream } from 'ai-v5/test';
function createMockModel({
objectGenerationMode,
mockText,
spyGenerate,
spyStream,
version = "v2"
}) {
const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
const finalText = objectGenerationMode === "json" ? JSON.stringify(mockText) : text;
if (version === "v1") {
const mockModel2 = new MockLanguageModelV1({
defaultObjectGenerationMode: objectGenerationMode,
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: finalText
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
const chunks = finalText.split(" ").map((word) => ({
type: "text-delta",
textDelta: word + " "
}));
return {
stream: simulateReadableStream({
chunks: [
...chunks,
{
type: "finish",
finishReason: "stop",
logprobs: void 0,
usage: { completionTokens: 10, promptTokens: 3 }
}
]
}),
rawCall: { rawPrompt: null, rawSettings: {} }
};
}
});
return mockModel2;
}
const mockModel = new MockLanguageModelV2({
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 },
content: [
{
type: "text",
text: finalText
}
],
warnings: []
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
warnings: [],
stream: convertArrayToReadableStream([
{ type: "stream-start", warnings: [] },
{ type: "response-metadata", id: "id-0", modelId: "mock-model-id", timestamp: /* @__PURE__ */ new Date(0) },
{ type: "text-start", id: "1" },
{ type: "text-delta", id: "1", delta: finalText },
{ type: "text-end", id: "1" },
{
type: "finish",
finishReason: "stop",
usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 }
}
])
};
}
});
return mockModel;
}
var MockProvider = class extends MastraLLMV1 {
constructor({
spyGenerate,
spyStream,
objectGenerationMode,
mockText = "Hello, world!"
}) {
const mockModel = new MockLanguageModelV1({
defaultObjectGenerationMode: objectGenerationMode,
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
if (objectGenerationMode === "json") {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: JSON.stringify(mockText)
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: typeof mockText === "string" ? mockText : JSON.stringify(mockText)
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
const chunks = text.split(" ").map((word) => ({
type: "text-delta",
textDelta: word + " "
}));
return {
stream: simulateReadableStream({
chunks: [
...chunks,
{
type: "finish",
finishReason: "stop",
logprobs: void 0,
usage: { completionTokens: 10, promptTokens: 3 }
}
]
}),
rawCall: { rawPrompt: null, rawSettings: {} }
};
}
});
super({ model: mockModel });
}
// @ts-ignore
stream(...args) {
const result = super.stream(...args);
return {
...result,
// @ts-ignore on await read the stream
then: (onfulfilled, onrejected) => {
return result.baseStream.pipeTo(new WritableStream()).then(onfulfilled, onrejected);
}
};
}
// @ts-ignore
__streamObject(...args) {
const result = super.__streamObject(...args);
return {
...result,
// @ts-ignore on await read the stream
then: (onfulfilled, onrejected) => {
return result.baseStream.pipeTo(new WritableStream()).then(onfulfilled, onrejected);
}
};
}
};
export { MockProvider, createMockModel };
//# sourceMappingURL=llm-mock.js.map
//# sourceMappingURL=llm-mock.js.map