@just-every/ensemble
Version:
LLM provider abstraction layer with unified streaming interface
64 lines • 1.88 kB
JavaScript
let globalLoggers = [];
export function setEnsembleLogger(logger) {
if (logger === null) {
globalLoggers = [];
}
else {
if (!globalLoggers.includes(logger)) {
globalLoggers.push(logger);
}
}
}
export function addEnsembleLogger(logger) {
if (!globalLoggers.includes(logger)) {
globalLoggers.push(logger);
}
}
export function removeEnsembleLogger(logger) {
const index = globalLoggers.indexOf(logger);
if (index > -1) {
globalLoggers.splice(index, 1);
}
}
export function getEnsembleLogger() {
return globalLoggers[0] || null;
}
export function getAllEnsembleLoggers() {
return [...globalLoggers];
}
export function log_llm_request(agentId, providerName, model, requestData, timestamp) {
const requestIds = [];
for (const logger of globalLoggers) {
try {
const requestId = logger.log_llm_request(agentId, providerName, model, requestData, timestamp);
if (requestId) {
requestIds.push(requestId);
}
}
catch (error) {
console.error('Error in logger.log_llm_request:', error);
}
}
return requestIds[0] || '';
}
export function log_llm_response(requestId, responseData, timestamp) {
for (const logger of globalLoggers) {
try {
logger.log_llm_response(requestId, responseData, timestamp);
}
catch (error) {
console.error('Error in logger.log_llm_response:', error);
}
}
}
export function log_llm_error(requestId, errorData, timestamp) {
for (const logger of globalLoggers) {
try {
logger.log_llm_error(requestId, errorData, timestamp);
}
catch (error) {
console.error('Error in logger.log_llm_error:', error);
}
}
}
//# sourceMappingURL=llm_logger.js.map