@mastra/core
Version:
Mastra is the Typescript framework for building AI agents and assistants. It’s used by some of the largest companies in the world to build internal AI automation tooling and customer-facing agents.
143 lines (140 loc) • 4.18 kB
JavaScript
;
var chunkBL44ZRZT_cjs = require('../chunk-BL44ZRZT.cjs');
var ai = require('ai');
var test = require('ai/test');
function createMockModel({
objectGenerationMode,
mockText,
spyGenerate,
spyStream
}) {
const mockModel = new test.MockLanguageModelV1({
defaultObjectGenerationMode: objectGenerationMode,
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
if (objectGenerationMode === "json") {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: JSON.stringify(mockText)
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: typeof mockText === "string" ? mockText : JSON.stringify(mockText)
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
const chunks = text.split(" ").map((word) => ({
type: "text-delta",
textDelta: word + " "
}));
return {
stream: ai.simulateReadableStream({
chunks: [
...chunks,
{
type: "finish",
finishReason: "stop",
logprobs: void 0,
usage: { completionTokens: 10, promptTokens: 3 }
}
]
}),
rawCall: { rawPrompt: null, rawSettings: {} }
};
}
});
return mockModel;
}
var MockProvider = class extends chunkBL44ZRZT_cjs.MastraLLMV1 {
constructor({
spyGenerate,
spyStream,
objectGenerationMode,
mockText = "Hello, world!"
}) {
const mockModel = new test.MockLanguageModelV1({
defaultObjectGenerationMode: objectGenerationMode,
doGenerate: async (props) => {
if (spyGenerate) {
spyGenerate(props);
}
if (objectGenerationMode === "json") {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: JSON.stringify(mockText)
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: typeof mockText === "string" ? mockText : JSON.stringify(mockText)
};
},
doStream: async (props) => {
if (spyStream) {
spyStream(props);
}
const text = typeof mockText === "string" ? mockText : JSON.stringify(mockText);
const chunks = text.split(" ").map((word) => ({
type: "text-delta",
textDelta: word + " "
}));
return {
stream: ai.simulateReadableStream({
chunks: [
...chunks,
{
type: "finish",
finishReason: "stop",
logprobs: void 0,
usage: { completionTokens: 10, promptTokens: 3 }
}
]
}),
rawCall: { rawPrompt: null, rawSettings: {} }
};
}
});
super({ model: mockModel });
}
// @ts-ignore
stream(...args) {
const result = super.stream(...args);
return {
...result,
// @ts-ignore on await read the stream
then: (onfulfilled, onrejected) => {
return result.baseStream.pipeTo(new WritableStream()).then(onfulfilled, onrejected);
}
};
}
// @ts-ignore
__streamObject(...args) {
const result = super.__streamObject(...args);
return {
...result,
// @ts-ignore on await read the stream
then: (onfulfilled, onrejected) => {
return result.baseStream.pipeTo(new WritableStream()).then(onfulfilled, onrejected);
}
};
}
};
exports.MockProvider = MockProvider;
exports.createMockModel = createMockModel;
//# sourceMappingURL=llm-mock.cjs.map
//# sourceMappingURL=llm-mock.cjs.map