jorel
Version:
A unified wrapper for working with LLMs from multiple providers, including streams, images, documents & automatic tool use.
133 lines (132 loc) • 5.29 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.GroqProviderNative = void 0;
const groq_sdk_1 = require("groq-sdk");
const __1 = require("..");
const shared_1 = require("../../shared");
const tools_1 = require("../../tools");
const convert_inputs_1 = require("./convert-inputs");
const convert_llm_message_1 = require("./convert-llm-message");
/**
* Provides access to Groq and other compatible services
* @deprecated: use GroqProvider instead
*/
class GroqProviderNative {
constructor({ apiKey, apiUrl, name } = {}) {
this.name = name || "groq";
this.client = new groq_sdk_1.default({
apiKey: apiKey ?? process.env.Groq_API_KEY,
baseURL: apiUrl,
});
}
async generateResponse(model, messages, config = {}) {
const start = Date.now();
const temperature = config.temperature ?? undefined;
const response = await this.client.chat.completions.create({
model,
messages: await (0, convert_llm_message_1.convertLlmMessagesToGroqMessages)(messages),
temperature,
max_tokens: config.maxTokens || undefined,
response_format: config.json ? { type: "json_object" } : { type: "text" },
tools: config.tools?.asLlmFunctions,
parallel_tool_calls: config.tools && config.tools.hasTools ? config.tools.allowParallelCalls : undefined,
tool_choice: (0, convert_inputs_1.toolChoiceToGroq)(config.toolChoice),
});
const durationMs = Date.now() - start;
const inputTokens = response.usage?.prompt_tokens;
const outputTokens = response.usage?.completion_tokens;
const message = response.choices[0].message;
const toolCalls = message.tool_calls?.map((call) => ({
id: (0, shared_1.generateUniqueId)(),
request: {
id: call.id,
function: {
name: call.function.name,
arguments: tools_1.LlmToolKit.deserialize(call.function.arguments),
},
},
approvalState: config.tools?.getTool(call.function.name)?.requiresConfirmation
? "requiresApproval"
: "noApprovalRequired",
executionState: "pending",
result: null,
error: null,
}));
const provider = this.name;
return {
...(0, __1.generateAssistantMessage)(message.content, message.reasoning ?? null, toolCalls),
meta: {
model,
provider,
temperature,
durationMs,
inputTokens,
outputTokens,
},
};
}
async *generateResponseStream(model, messages, config = {}) {
const start = Date.now();
const temperature = config.temperature ?? undefined;
const response = await this.client.chat.completions.create({
model,
messages: await (0, convert_llm_message_1.convertLlmMessagesToGroqMessages)(messages),
temperature,
response_format: config.json ? { type: "json_object" } : { type: "text" },
max_tokens: config.maxTokens || undefined,
stream: true,
});
let content = "";
let reasoningContent = "";
for await (const chunk of response) {
const contentChunk = (0, shared_1.firstEntry)(chunk.choices)?.delta?.content;
if (contentChunk) {
content += contentChunk;
const chunkId = (0, shared_1.generateUniqueId)();
yield { type: "chunk", content: contentChunk, chunkId };
}
const reasoningChunk = (0, shared_1.firstEntry)(chunk.choices)?.delta?.reasoning;
if (reasoningChunk) {
reasoningContent += reasoningChunk;
const chunkId = (0, shared_1.generateUniqueId)();
yield { type: "reasoningChunk", content: reasoningChunk, chunkId };
}
}
const durationMs = Date.now() - start;
const inputTokens = undefined;
const outputTokens = undefined;
const provider = this.name;
yield {
type: "response",
role: "assistant",
content,
reasoningContent,
meta: {
model,
provider,
temperature,
durationMs,
inputTokens,
outputTokens,
},
};
}
async getAvailableModels() {
const models = await this.client.models.list();
return models.data.map((model) => model.id);
}
async createEmbedding(model, text) {
const response = await this.client.embeddings.create({
model,
input: text,
});
if (!response || !response.data || !response.data || response.data.length === 0) {
throw new Error("Failed to create embedding");
}
if (!Array.isArray(response.data[0].embedding)) {
throw new Error("Received unsupported embedding format");
}
return response.data[0].embedding;
}
}
exports.GroqProviderNative = GroqProviderNative;