@meshcore/cli
Version:
Official CLI for managing AI agents in MeshCore.ai with LLM-powered metadata extraction
138 lines (131 loc) • 5.48 kB
JavaScript
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.LLMService = void 0;
const openai_1 = __importDefault(require("openai"));
const config_1 = require("./config");
const agent_1 = require("../types/agent");
class LLMService {
constructor() {
this.config = config_1.ConfigService.getInstance();
this.initializeLLM();
}
static getInstance() {
if (!LLMService.instance) {
LLMService.instance = new LLMService();
}
return LLMService.instance;
}
initializeLLM() {
const { provider, apiKey } = this.config.getLLMConfig();
if (provider === 'openai' && apiKey) {
this.openai = new openai_1.default({ apiKey });
}
}
async extractMetadataFromReadme(content, additionalContext) {
if (!this.openai) {
throw new Error('LLM provider not configured. Please set OPENAI_API_KEY in your environment.');
}
const prompt = `Analyze the following README/documentation content and extract metadata for creating an AI agent in MeshCore.
Content:
${content}
${additionalContext ? `Additional Context:\n${additionalContext}\n` : ''}
Extract and return the following information in JSON format:
1. name: A concise, descriptive name for the agent (max 50 chars)
2. description: A comprehensive description of what the agent does (100-500 chars)
3. category: Choose the most appropriate category from common categories like:
"development", "data-analysis", "automation", "customer-service", "content-generation",
"integration", "monitoring", "security", "testing", "documentation", "utilities"
4. agentType: Determine if this should be:
- "AGENT" (autonomous, complex tasks)
- "TOOL" (specific utility/function)
- "LLM" (language model based)
5. pricingType: Suggest either "FREE" or "PER_TOKEN" based on complexity
6. capabilities: List 3-5 key capabilities as an array of strings
7. If pricingType is PER_TOKEN, suggest reasonable prices:
- suggestedPrice.inputTokenPrice (in cents per 1000 tokens)
- suggestedPrice.outputTokenPrice (in cents per 1000 tokens)
Return ONLY valid JSON without any markdown formatting or extra text.`;
try {
const response = await this.openai.chat.completions.create({
model: 'gpt-4-turbo-preview',
messages: [
{
role: 'system',
content: 'You are an expert at analyzing technical documentation and extracting structured metadata for AI agents.'
},
{
role: 'user',
content: prompt
}
],
temperature: 0.3,
max_tokens: 1000
});
const content = response.choices[0]?.message?.content;
if (!content) {
throw new Error('No response from LLM');
}
const metadata = JSON.parse(content);
return this.validateAndNormalizeMetadata(metadata);
}
catch (error) {
if (error instanceof SyntaxError) {
throw new Error('Failed to parse LLM response as JSON');
}
throw error;
}
}
validateAndNormalizeMetadata(metadata) {
const validAgentTypes = Object.values(agent_1.AgentType);
const validPricingTypes = Object.values(agent_1.PricingType);
if (!validAgentTypes.includes(metadata.agentType)) {
metadata.agentType = agent_1.AgentType.AGENT;
}
if (!validPricingTypes.includes(metadata.pricingType)) {
metadata.pricingType = agent_1.PricingType.FREE;
}
if (metadata.name.length > 50) {
metadata.name = metadata.name.substring(0, 50);
}
if (metadata.description.length > 500) {
metadata.description = metadata.description.substring(0, 497) + '...';
}
return metadata;
}
async generateAgentDescription(projectInfo) {
if (!this.openai) {
throw new Error('LLM provider not configured');
}
const prompt = `Generate a concise, professional description for an AI agent based on the following project information:
Project Name: ${projectInfo.name || 'Unknown'}
Project Type: ${projectInfo.type || 'Unknown'}
Key Dependencies: ${projectInfo.dependencies?.join(', ') || 'None specified'}
Main Files: ${projectInfo.files?.slice(0, 10).join(', ') || 'Not specified'}
Create a description that:
1. Clearly states what the agent does
2. Mentions key capabilities
3. Is between 100-300 characters
4. Uses professional, clear language
Return only the description text, no quotes or formatting.`;
const response = await this.openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{
role: 'user',
content: prompt
}
],
temperature: 0.5,
max_tokens: 150
});
return response.choices[0]?.message?.content || 'AI agent for automated tasks';
}
isConfigured() {
return !!this.openai;
}
}
exports.LLMService = LLMService;
//# sourceMappingURL=llm.js.map
;