@codai/romai-core
Version:
Core intelligence engine for ROMAI ecosystem - Romanian AI Central Intelligence
356 lines (348 loc) • 12.4 kB
JavaScript
import { OpenAI } from 'openai';
import { z } from 'zod';
import winston from 'winston';
import { randomUUID } from 'crypto';
// src/index.ts
var configSchema = z.object({
azure: z.object({
apiKey: z.string().transform((val) => val?.trim() || void 0).optional(),
endpoint: z.string().transform((val) => val?.trim() || void 0).optional().refine(
(val) => !val || z.string().url().safeParse(val).success,
"Invalid URL format"
),
apiVersion: z.string().default("2024-12-01-preview"),
deploymentName: z.string().default("gpt-4")
}),
memory: z.object({
provider: z.enum(["memorai", "local", "redis"]).default("memorai"),
config: z.record(z.unknown()).default({})
}),
mcp: z.object({
port: z.number().default(3001),
name: z.string().default("romai-mcp"),
version: z.string().default("0.1.0"),
description: z.string().default("ROMAI MCP Server")
}),
api: z.object({
port: z.number().default(3e3),
cors: z.object({
origin: z.union([z.string(), z.array(z.string())]).default("*"),
credentials: z.boolean().default(true)
}).default({}),
rateLimit: z.object({
windowMs: z.number().default(15 * 60 * 1e3),
// 15 minutes
max: z.number().default(100)
// limit each IP to 100 requests per windowMs
}).default({}),
auth: z.object({
jwtSecret: z.string().min(32),
expiresIn: z.string().default("24h")
})
})
});
var logger = winston.createLogger({
level: process.env.NODE_ENV === "development" ? "debug" : "info",
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
winston.format.json()
),
defaultMeta: { service: "romai-core" },
transports: [
new winston.transports.File({ filename: "logs/error.log", level: "error" }),
new winston.transports.File({ filename: "logs/combined.log" })
]
});
if (process.env.NODE_ENV !== "production") {
logger.add(
new winston.transports.Console({
format: winston.format.combine(winston.format.colorize(), winston.format.simple())
})
);
}
var RomaiCore = class {
client;
config;
logger;
constructor(config) {
this.logger = logger;
this.config = this.validateConfig(config);
if (this.config.azure.apiKey && this.config.azure.endpoint) {
this.client = new OpenAI({
apiKey: this.config.azure.apiKey,
baseURL: `${this.config.azure.endpoint}/openai/deployments/${this.config.azure.deploymentName}`,
defaultQuery: { "api-version": this.config.azure.apiVersion },
defaultHeaders: {
"api-key": this.config.azure.apiKey
}
});
this.logger.info("ROMAI Core initialized successfully with Azure OpenAI", {
endpoint: this.config.azure.endpoint,
model: this.config.azure.deploymentName
});
} else {
this.client = null;
this.logger.info("ROMAI Core initialized in MCP-only mode (no Azure OpenAI)");
}
}
validateConfig(config) {
try {
const parsed = configSchema.parse(config);
return {
azure: {
apiKey: parsed.azure.apiKey || void 0,
endpoint: parsed.azure.endpoint || void 0,
apiVersion: parsed.azure.apiVersion,
deploymentName: parsed.azure.deploymentName
},
memory: parsed.memory,
mcp: parsed.mcp,
api: parsed.api
};
} catch (error) {
this.logger.error("Invalid configuration", { error });
throw new Error(`Configuration validation failed: ${error}`);
}
}
async generateResponse(request) {
if (!this.client) {
throw new Error("Azure OpenAI not configured. Cannot generate AI responses in MCP-only mode.");
}
try {
this.logger.debug("Generating AI response", {
messageCount: request.messages.length,
model: request.model || this.config.azure.deploymentName
});
const response = await this.client.chat.completions.create({
model: request.model || this.config.azure.deploymentName || "gpt-4",
messages: request.messages.map((msg) => ({
role: msg.role,
content: msg.content
})),
temperature: request.temperature || 0.7,
max_tokens: request.maxTokens || 2e3
});
const choice = response.choices[0];
if (!choice?.message?.content) {
throw new Error("No response content generated");
}
const aiResponse = {
message: {
id: randomUUID(),
role: "assistant",
content: choice.message.content,
timestamp: /* @__PURE__ */ new Date(),
metadata: {
model: request.model || this.config.azure.deploymentName,
finishReason: choice.finish_reason
}
},
usage: response.usage ? {
promptTokens: response.usage.prompt_tokens,
completionTokens: response.usage.completion_tokens,
totalTokens: response.usage.total_tokens
} : void 0,
model: request.model || this.config.azure.deploymentName,
finishReason: choice.finish_reason || void 0
};
this.logger.info("AI response generated successfully", {
responseLength: choice.message.content.length,
tokensUsed: response.usage?.total_tokens || 0
});
return aiResponse;
} catch (error) {
this.logger.error("Failed to generate AI response", { error });
throw error;
}
}
async processIntelligenceRequest(request) {
try {
this.logger.debug("Processing intelligence request", {
language: request.language,
domain: request.domain,
queryLength: request.query.length
});
const systemPrompt = this.buildSystemPrompt(request);
const messages = [
{
id: randomUUID(),
role: "system",
content: systemPrompt,
timestamp: /* @__PURE__ */ new Date()
},
{
id: randomUUID(),
role: "user",
content: request.query,
timestamp: /* @__PURE__ */ new Date()
}
];
if (request.context) {
messages.splice(1, 0, {
id: randomUUID(),
role: "system",
content: `Context: ${request.context}`,
timestamp: /* @__PURE__ */ new Date()
});
}
const aiResponse = await this.generateResponse({
messages,
temperature: 0.7,
maxTokens: 2e3
});
const intelligenceResponse = {
response: aiResponse.message.content,
confidence: this.calculateConfidence(aiResponse),
sources: [],
// TODO: Implement source extraction
relatedTopics: [],
// TODO: Implement topic extraction
suggestions: []
// TODO: Implement suggestion generation
};
this.logger.info("Intelligence request processed successfully", {
confidence: intelligenceResponse.confidence,
responseLength: intelligenceResponse.response.length
});
return intelligenceResponse;
} catch (error) {
this.logger.error("Failed to process intelligence request", { error });
throw error;
}
}
buildSystemPrompt(request) {
const language = request.language || "ro";
const domain = request.domain || "general";
let prompt = "";
if (language === "ro") {
prompt = `E\u0219ti ROMAI, sistemul de inteligen\u021B\u0103 artificial\u0103 rom\xE2nesc central al ecosistemului CodAI.
E\u0219ti un expert \xEEn toate domeniile \u0219i po\u021Bi rezolva orice problem\u0103 cu precizie \u0219i creativitate.
Caracteristici importante:
- R\u0103spunzi \xEEntotdeauna \xEEn rom\xE2n\u0103, cu un stil natural \u0219i conversa\u021Bional
- E\u0219ti extrem de precis \u0219i detaliat \xEEn explica\u021Bii
- Oferi solu\u021Bii practice \u0219i implementabile
- Ai o cunoa\u0219tere profund\u0103 a culturii \u0219i contextului rom\xE2nesc
- E\u0219ti capabil s\u0103 lucrezi \xEEn orice domeniu: tehnologie, business, educa\u021Bie, \u0219tiin\u021B\u0103, art\u0103
Domeniul curent: ${domain}
Instruc\u021Biuni:
1. Analizeaz\u0103 cererea cu aten\u021Bie
2. Ofer\u0103 un r\u0103spuns complet \u0219i structurat
3. Include exemple concrete c\xE2nd este relevant
4. Sugereaz\u0103 pa\u0219i urm\u0103tori dac\u0103 este cazul
5. Men\u021Bine un ton profesional dar prietenos`;
} else {
prompt = `You are ROMAI, the central Romanian AI intelligence system of the CodAI ecosystem.
You are an expert in all domains and can solve any problem with precision and creativity.
Important characteristics:
- You provide accurate and detailed explanations
- You offer practical and implementable solutions
- You have deep knowledge across all domains
- You can work in any field: technology, business, education, science, arts
Current domain: ${domain}
Instructions:
1. Analyze the request carefully
2. Provide a complete and structured response
3. Include concrete examples when relevant
4. Suggest next steps if applicable
5. Maintain a professional but friendly tone`;
}
return prompt;
}
calculateConfidence(response) {
let confidence = 0.8;
const responseLength = response.message.content.length;
if (responseLength > 500) confidence += 0.1;
if (responseLength < 100) confidence -= 0.2;
if (response.finishReason === "stop") confidence += 0.1;
if (response.finishReason === "length") confidence -= 0.1;
return Math.max(0, Math.min(1, confidence));
}
async healthCheck() {
try {
if (!this.client) {
return {
status: "healthy",
timestamp: /* @__PURE__ */ new Date(),
details: {
azure: "not_configured",
mode: "mcp_only"
}
};
}
await this.client.chat.completions.create({
model: this.config.azure.deploymentName || "gpt-4",
messages: [{ role: "user", content: "Test connectivity" }],
max_tokens: 10
});
return {
status: "healthy",
timestamp: /* @__PURE__ */ new Date(),
details: {
azure: "connected",
model: this.config.azure.deploymentName,
endpoint: this.config.azure.endpoint
}
};
} catch (error) {
this.logger.error("Health check failed", { error });
return {
status: "unhealthy",
timestamp: /* @__PURE__ */ new Date(),
details: {
azure: "disconnected",
error: error instanceof Error ? error.message : "Unknown error"
}
};
}
}
getConfig() {
return { ...this.config };
}
getLogger() {
return this.logger;
}
};
function createRomaiCore(config) {
return new RomaiCore(config);
}
function loadConfigFromEnv() {
const config = {
azure: {
apiKey: process.env.AZURE_OPENAI_API_KEY?.replace(/\s+/g, "") || void 0,
endpoint: process.env.AZURE_OPENAI_ENDPOINT?.trim() || void 0,
apiVersion: process.env.AZURE_OPENAI_API_VERSION || "2024-12-01-preview",
deploymentName: process.env.AZURE_OPENAI_DEPLOYMENT_NAME || "gpt-4"
},
memory: {
provider: process.env.ROMAI_MEMORY_PROVIDER || "memorai",
config: {}
},
mcp: {
port: parseInt(process.env.ROMAI_MCP_PORT || "3001"),
name: process.env.ROMAI_MCP_NAME || "romai-mcp",
version: process.env.ROMAI_MCP_VERSION || "0.1.0",
description: process.env.ROMAI_MCP_DESCRIPTION || "ROMAI MCP Server"
},
api: {
port: parseInt(process.env.ROMAI_API_PORT || "3000"),
cors: {
origin: process.env.ROMAI_CORS_ORIGIN || "*",
credentials: process.env.ROMAI_CORS_CREDENTIALS === "true"
},
rateLimit: {
windowMs: parseInt(process.env.ROMAI_RATE_LIMIT_WINDOW || "900000"),
// 15 minutes
max: parseInt(process.env.ROMAI_RATE_LIMIT_MAX || "100")
},
auth: {
jwtSecret: process.env.ROMAI_JWT_SECRET || "romai-secret-key-change-in-production",
expiresIn: process.env.ROMAI_JWT_EXPIRES_IN || "24h"
}
}
};
return config;
}
export { RomaiCore, createRomaiCore, loadConfigFromEnv };
//# sourceMappingURL=index.mjs.map
//# sourceMappingURL=index.mjs.map