@juspay/neurolink
Version:
Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and
126 lines (125 loc) • 4.49 kB
JavaScript
import { createAnalytics } from "./analytics.js";
import { logger } from "../utils/logger.js";
/**
* Base implementation for collecting analytics from Vercel AI SDK stream results
*/
export class BaseStreamAnalyticsCollector {
/**
* Collect token usage from stream result
*/
async collectUsage(result) {
try {
const usage = await result.usage;
if (!usage) {
logger.debug("No usage data available from stream result");
return {
inputTokens: 0,
outputTokens: 0,
totalTokens: 0,
};
}
return {
inputTokens: usage.promptTokens || 0,
outputTokens: usage.completionTokens || 0,
totalTokens: usage.totalTokens ||
(usage.promptTokens || 0) + (usage.completionTokens || 0),
};
}
catch (error) {
logger.warn("Failed to collect usage from stream result", { error });
return {
inputTokens: 0,
outputTokens: 0,
totalTokens: 0,
};
}
}
/**
* Collect response metadata from stream result
*/
async collectMetadata(result) {
try {
const [response, finishReason] = await Promise.all([
result.response,
result.finishReason,
]);
return {
id: response?.id,
model: response?.model,
timestamp: response?.timestamp instanceof Date
? response.timestamp.getTime()
: response?.timestamp || Date.now(),
finishReason: finishReason,
};
}
catch (error) {
logger.warn("Failed to collect metadata from stream result", { error });
const finishReason = await result.finishReason.catch(() => "error");
return {
timestamp: Date.now(),
finishReason: finishReason,
};
}
}
/**
* Create comprehensive analytics from stream result
*/
async createAnalytics(provider, model, result, responseTime, metadata) {
try {
// Collect analytics data in parallel
const [usage, responseMetadata] = await Promise.all([
this.collectUsage(result),
this.collectMetadata(result),
]);
// Get final text content and finish reason
const [content, finishReason, toolResults, toolCalls] = await Promise.all([
result.text,
result.finishReason,
result.toolResults || Promise.resolve([]),
result.toolCalls || Promise.resolve([]),
]);
// Create comprehensive analytics
return createAnalytics(provider, model, {
usage,
content,
response: responseMetadata,
finishReason: finishReason,
toolResults: toolResults,
toolCalls: toolCalls,
}, responseTime, {
...metadata,
streamingMode: true,
responseId: responseMetadata.id,
finishReason: finishReason,
});
}
catch (error) {
logger.error("Failed to create analytics from stream result", {
provider,
model,
error: error instanceof Error ? error.message : String(error),
});
// Return minimal analytics on error
return createAnalytics(provider, model, { usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 } }, responseTime, {
...metadata,
streamingMode: true,
analyticsError: true,
});
}
}
/**
* Clean up resources and force garbage collection if needed
*/
cleanup() {
// Only force garbage collection if memory usage exceeds 500 MB
const heapUsed = process.memoryUsage().heapUsed;
const GC_THRESHOLD = 500 * 1024 * 1024; // 500 MB
if (typeof global !== "undefined" && global.gc && heapUsed > GC_THRESHOLD) {
global.gc();
}
}
}
/**
* Global instance of stream analytics collector
*/
export const streamAnalyticsCollector = new BaseStreamAnalyticsCollector();