@guyycodes/plugin-sdk
Version:
AI-powered plugin scaffolding tool - Create full-stack applications with 7+ AI models, 50+ business integrations, and production-ready infrastructure
1,581 lines (1,332 loc) • 77.3 kB
JavaScript
const fs = require('fs-extra');
const path = require('path');
const chalk = require('chalk');
// Import model creation functions
const { createModelFiles } = require('./models');
// Import config creation functions
const { createEnvConfigFiles } = require('./config');
// Import chat creation functions
const { createChatFiles } = require('./chat');
// Import agent creation functions
const { createAgentFiles } = require('./agent');
// Import integrations creation functions
const { createIntegrationsFiles } = require('./integrations');
// Import tools creation functions
const { createToolsFiles } = require('./tools');
async function createBackendFiles(projectPath, backendType, integration = 'quickbooks', projectName) {
console.log(chalk.blue(`🔧 Creating ${backendType} backend files...`));
const serverPath = path.join(projectPath, 'src/server');
if (backendType === 'nodejs') {
await createNodejsBackend(serverPath, integration, projectName);
} else {
await createPythonBackend(serverPath, integration, projectName);
}
}
async function createNodejsBackend(serverPath, integration = 'quickbooks', projectName) {
// Create package.json for server
const serverPackageJson = {
"name": "plugin-server",
"version": "1.0.0",
"type": "module",
"description": "Plugin backend server",
"main": "dist/index.js",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"quickstart": "node scripts/quickstart.js",
"clean": "rm -rf dist",
"test": "node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=\\.test\\.ts$ --testPathIgnorePatterns=\\.int\\.test\\.ts$",
"test:int": "node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=\\.int\\.test\\.ts$",
"format": "prettier --write .",
"lint": "eslint src",
"format:check": "prettier --check .",
"lint:langgraph-json": "node scripts/checkLanggraphPaths.js",
"lint:all": "yarn lint & yarn lint:langgraph-json & yarn format:check",
"test:all": "yarn test && yarn test:int && yarn lint:langgraph"
},
"dependencies": {
"axios": "^1.6.0",
"@langchain/community": "^0.3.14",
"@langchain/core": "^0.3.57",
"@langchain/langgraph": "^0.3.0",
"@langchain/openai": "^0.3.11",
"@langchain/tavily": "^0.1.3",
"cors": "^2.8.5",
"dotenv": "^16.4.5",
"express": "^4.21.1",
"@tavily/core": "^0.5.8",
"zod": "^3.23.8"
},
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.9.1",
"@tsconfig/recommended": "^1.0.7",
"@types/cors": "^2.8.17",
"@types/express": "^5.0.0",
"@types/jest": "^29.5.0",
"@types/node": "^22.10.2",
"@typescript-eslint/eslint-plugin": "^5.59.8",
"@typescript-eslint/parser": "^5.59.8",
"dotenv": "^16.4.5",
"eslint": "^8.41.0",
"eslint-config-prettier": "^8.8.0",
"eslint-plugin-import": "^2.27.5",
"eslint-plugin-no-instanceof": "^1.0.1",
"eslint-plugin-prettier": "^4.2.1",
"jest": "^29.7.0",
"prettier": "^3.3.3",
"ts-jest": "^29.1.0",
"tsx": "^4.19.2",
"typescript": "^5.3.3"
}
};
fs.writeJsonSync(path.join(serverPath, 'package.json'), serverPackageJson, { spaces: 2 });
// Create TypeScript config
const tsConfig = {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"allowJs": true,
"allowSyntheticDefaultImports": true,
"allowImportingTsExtensions": false,
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"types": ["node", "jest"],
"typeRoots": ["./node_modules/@types", "./types"],
"noEmit": false
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts"],
"ts-node": {
"esm": true,
"experimentalSpecifierResolution": "node"
}
};
fs.writeJsonSync(path.join(serverPath, 'tsconfig.json'), tsConfig, { spaces: 2 });
// Create source directory
fs.ensureDirSync(path.join(serverPath, 'src'));
// Create main server file
const indexTs = `
import cors from 'cors';
import dotenv from 'dotenv';
import { oauthStart } from './oauth-start.js';
import { oauthCallback } from './callback.js';
import { listTools } from "./tools/tools.js";
import { handleChat, handleChatStream } from "./chat/chat.js";
import express, { Request, Response, RequestHandler } from "express";
import { getEnvironmentInfo, validateEnvironment } from "./config/env.js";
import { requireApiKey, optionalApiKey } from "./config/auth.js";
dotenv.config();
// Validate environment variables using env.ts
validateEnvironment();
const PORT = parseInt(process.env.PORT || '3000', 10);
// Environment variables - now guaranteed to be available after validation
const NODE_ENV = process.env.NODE_ENV!;
const LANGCHAIN_TRACING_V2 = process.env.LANGCHAIN_TRACING_V2!;
// Health check handler
const healthCheck: RequestHandler = (_req, res) => {
res.json({ status: "ok", service: "${projectName}-backend" });
};
// Environment info handler
const getEnvInfo: RequestHandler = (_req, res) => {
res.json(getEnvironmentInfo());
};
export default async function startServer() {
const app = express();
// Middleware
app.use(cors());
app.use(express.json());
// Plugin routes
app.get('/api/oauth/start', oauthStart);
app.get('/api/oauth/callback', oauthCallback);
app.post('/api/oauth/disconnect', (req, res) => {
// TODO: Implement token revocation/cleanup logic
// Clear any stored tokens for this user/session
console.log('OAuth disconnect requested');
res.json({ success: true, message: 'Disconnected successfully' });
});
// Routes
app.get("/health", healthCheck);
// API key validation endpoint
app.post('/api/validate-key', (req, res) => {
const { apiKey } = req.body;
if (!apiKey) {
return res.status(400).json({
success: false,
message: 'API key is required'
});
}
// Import validate function inline to avoid circular deps
const { validateApiKey } = require('./config/auth.js');
if (validateApiKey(apiKey)) {
res.json({
success: true,
message: 'Valid API key'
});
} else {
res.status(401).json({
success: false,
message: 'Invalid API key'
});
}
});
// Basic data endpoint - protected with API key
app.get('/api/data', requireApiKey, (req, res) => {
res.json({
status: 'ok',
connected: true,
message: 'Authenticated successfully',
timestamp: new Date().toISOString()
});
});
app.get("/env", getEnvInfo);
// Protected chat endpoints - require API key
app.post("/api/chat", requireApiKey, handleChat);
app.post("/api/chat/stream", requireApiKey, handleChatStream);
app.get("/tools", listTools);
// Start the server
app.listen(PORT, '0.0.0.0', () => {
console.log(\`🧠 The TS server is API only, for local models and full ML/LLM, try the python backend\`);
console.log(\` 🔑 temp API keys are located in /server/confic/auth \`);
console.log(\`Chatbot server running on port \${PORT}\`);
console.log(\`Health check: http://localhost:\${PORT}/health\`);
console.log(\`Environment info: http://localhost:\${PORT}/env\`);
console.log(\`Chat endpoint: http://localhost:\${PORT}/api/chat\`);
console.log(\`Stream endpoint: http://localhost:\${PORT}/api/chat/stream\`);
console.log(\`\nLangSmith tracing enabled: \${LANGCHAIN_TRACING_V2 === "true"}\`);
console.log(\`🚀 Plugin server running on port \${PORT}\`);
console.log(\`🔗 If server fails to connect: check browser dev tools console for existing chat_session_id and/or api-key from local storage, remove them & start fresh\`);
});
}
// Start the server when this file is run directly
startServer().catch(console.error);
`;
fs.writeFileSync(path.join(serverPath, 'src/index.ts'), indexTs);
// Create oauth-start.ts
const oauthStartTs = integration === 'custom'
? `import { Request, Response } from 'express';
export const oauthStart = (req: Request, res: Response) => {
// Generic OAuth start implementation
// TODO: Configure these values for your specific OAuth provider
const authUrl = process.env.OAUTH_AUTH_URL || 'https://your-provider.com/oauth/authorize';
const clientId = process.env.OAUTH_CLIENT_ID;
const redirectUri = process.env.OAUTH_REDIRECT_URI || 'http://localhost:3000/api/oauth/callback';
const scope = process.env.OAUTH_SCOPE || 'read';
if (!clientId) {
res.status(500).json({ error: 'OAuth client ID not configured' });
return;
}
if (!authUrl || authUrl.includes('your-provider.com')) {
res.status(500).json({ error: 'OAuth auth URL not configured. Please set OAUTH_AUTH_URL in your .env file.' });
return;
}
const authorizationUrl = \`\${authUrl}?client_id=\${clientId}&redirect_uri=\${redirectUri}&response_type=code&scope=\${scope}\`;
res.json({ authUrl: authorizationUrl });
};
`
: `import { Request, Response } from 'express';
export const oauthStart = (req: Request, res: Response) => {
// ${integration} OAuth start implementation
const authUrl = process.env.OAUTH_AUTH_URL || 'https://your-provider.com/oauth/authorize';
const clientId = process.env.OAUTH_CLIENT_ID;
const redirectUri = process.env.OAUTH_REDIRECT_URI || 'http://localhost:3000/api/oauth/callback';
if (!clientId) {
return res.status(500).json({ error: 'OAuth client ID not configured' });
}
const authorizationUrl = \`\${authUrl}?client_id=\${clientId}&redirect_uri=\${redirectUri}&response_type=code&scope=read_write\`;
res.json({ authUrl: authorizationUrl });
};
`;
fs.writeFileSync(path.join(serverPath, 'src/oauth-start.ts'), oauthStartTs);
// Create callback.ts
const callbackTs = integration === 'custom'
? `import { Request, Response } from 'express';
import axios from 'axios';
export const oauthCallback = async (req: Request, res: Response) => {
try {
const { code } = req.query;
if (!code) {
res.status(400).json({ error: 'Authorization code not provided' });
return;
}
// Generic OAuth token exchange
// TODO: Customize this for your specific OAuth provider
const tokenUrl = process.env.OAUTH_TOKEN_URL || 'https://your-provider.com/oauth/token';
const clientId = process.env.OAUTH_CLIENT_ID;
const clientSecret = process.env.OAUTH_CLIENT_SECRET;
const redirectUri = process.env.OAUTH_REDIRECT_URI || 'http://localhost:3000/api/oauth/callback';
if (!tokenUrl || tokenUrl.includes('your-provider.com')) {
res.status(500).json({ error: 'OAuth token URL not configured. Please set OAUTH_TOKEN_URL in your .env file.' });
return;
}
const tokenResponse = await axios.post(tokenUrl, {
grant_type: 'authorization_code',
code,
client_id: clientId,
client_secret: clientSecret,
redirect_uri: redirectUri
});
const { access_token, refresh_token } = tokenResponse.data;
// TODO: Store tokens securely (implement your storage logic)
// Consider using a secure database or encrypted storage
console.log('OAuth tokens received - implement secure storage!');
// TODO: Redirect to success page or return tokens as needed
res.json({
success: true,
message: 'OAuth callback successful',
// Don't return tokens in production - store them securely instead
debug: process.env.NODE_ENV === 'development' ? { hasToken: !!access_token } : undefined
});
} catch (error) {
console.error('OAuth callback error:', error);
res.status(500).json({
error: 'OAuth callback failed',
details: process.env.NODE_ENV === 'development' ?
(error instanceof Error ? error.message : String(error)) : undefined
});
}
};
`
: `import { Request, Response } from 'express';
import axios from 'axios';
export const oauthCallback = async (req: Request, res: Response) => {
try {
const { code } = req.query;
if (!code) {
res.status(400).json({ error: 'Authorization code not provided' });
return;
}
// ${integration} OAuth token exchange
const tokenUrl = process.env.OAUTH_TOKEN_URL || 'https://your-provider.com/oauth/token';
const clientId = process.env.OAUTH_CLIENT_ID;
const clientSecret = process.env.OAUTH_CLIENT_SECRET;
const redirectUri = process.env.OAUTH_REDIRECT_URI || 'http://localhost:3000/api/oauth/callback';
const tokenResponse = await axios.post(tokenUrl, {
grant_type: 'authorization_code',
code,
client_id: clientId,
client_secret: clientSecret,
redirect_uri: redirectUri
});
const { access_token } = tokenResponse.data;
// Store token securely (implement your storage logic)
console.log('Access token received:', access_token);
res.json({ success: true, message: 'OAuth callback successful' });
} catch (error) {
console.error('OAuth callback error:', error);
res.status(500).json({ error: 'OAuth callback failed' });
}
};
`;
fs.writeFileSync(path.join(serverPath, 'src/callback.ts'), callbackTs);
// Create additional directories
fs.ensureDirSync(path.join(serverPath, 'src/config'));
fs.ensureDirSync(path.join(serverPath, 'src/agent'));
fs.ensureDirSync(path.join(serverPath, 'src/chat'));
fs.ensureDirSync(path.join(serverPath, 'src/config'));
fs.ensureDirSync(path.join(serverPath, 'src/tools'));
const configAuthTs = `
import { Request, Response, NextFunction } from 'express';
// Development API keys - in production, these should be stored securely
const VALID_API_KEYS = [
"pk_dev_7f8a9b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a",
"pk_test_3e4f5a6b7c8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8f9a0b1c2d3e4f",
"pk_demo_9a0b1c2d3e4f5a6b7c8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8f9a0b"
];
/**
* Validate if the provided API key is valid
*/
export function validateApiKey(apiKey: string | undefined): boolean {
if (!apiKey) {
return false;
}
return VALID_API_KEYS.includes(apiKey);
}
/**
* Extract API key from request headers
*/
export function getApiKeyFromHeaders(headers: any): string | undefined {
return headers['x-api-key'] || headers['X-API-Key'] || headers['authorization']?.replace('Bearer ', '');
}
/**
* Middleware to check API key authentication
*/
export function requireApiKey(req: Request, res: Response, next: NextFunction) {
const apiKey = getApiKeyFromHeaders(req.headers);
if (!validateApiKey(apiKey)) {
return res.status(401).json({
error: 'Unauthorized',
message: 'Invalid or missing API key'
});
}
// Add API key to request for potential use downstream
(req as any).apiKey = apiKey;
next();
}
/**
* Optional middleware - allows requests with or without API key
* Useful for endpoints that provide different functionality based on auth
*/
export function optionalApiKey(req: Request, res: Response, next: NextFunction) {
const apiKey = getApiKeyFromHeaders(req.headers);
if (apiKey && validateApiKey(apiKey)) {
(req as any).apiKey = apiKey;
(req as any).isAuthenticated = true;
} else {
(req as any).isAuthenticated = false;
}
next();
}
`;
fs.writeFileSync(path.join(serverPath, 'src/config/auth.ts'), configAuthTs);
// Create agent files
const agentGraphTs = `/**
* LangGraph Chatbot with Web Search Capabilities
* This chatbot can answer questions, search the web, and maintain conversation context.
*/
import { StateGraph } from "@langchain/langgraph";
import { RunnableConfig } from "@langchain/core/runnables";
import { ChatOpenAI } from "@langchain/openai";
import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
import { StateAnnotation } from "./state.js";
import { TavilySearch } from "@langchain/tavily";
import { StructuredTool } from "@langchain/core/tools";
import { z } from "zod";
import { validateEnvironment } from "../config/env.js";
// Validate environment variables using env.ts
validateEnvironment();
// Configuration constants
const TOP_RESULTS = 3; // Number of top search results to return
// Environment variables - now guaranteed to be available after validation
const OPENAI_API_KEY = process.env.OPENAI_API_KEY!;
const TAVILY_API_KEY = process.env.TAVILY_API_KEY!;
// Initialize the LLM
const llm = new ChatOpenAI({
model: "gpt-4o-mini",
temperature: 0,
apiKey: OPENAI_API_KEY,
});
// Define a custom search tool that the LLM can use
class WebSearchTool extends StructuredTool {
name = "web_search";
description = "Search the web for current information about any topic. Use this when you need up-to-date information or facts.";
schema = z.object({
query: z.string().describe("The search query"),
});
protected async _call({ query }: { query: string }): Promise<string> {
try {
// Import the Tavily client directly to avoid type issues
const { tavily } = await import('@tavily/core');
// Create a new Tavily client instance
const client = tavily({
apiKey: TAVILY_API_KEY,
});
// Perform the search - Tavily client takes query as single parameter
const searchResponse = await client.search(query);
console.log("Tavily results:", searchResponse);
// Format the results according to the schema
if (searchResponse && searchResponse.results && searchResponse.results.length > 0) {
// Limit to TOP_RESULTS
const topResults = searchResponse.results.slice(0, TOP_RESULTS);
const formattedResults = topResults.map((result: any) => ({
title: result.title || '',
url: result.url || '',
contentSummary: result.content || result.snippet || '',
images: searchResponse.images || [],
sources: [result.url || ''],
files: []
}));
// Return as JSON string for the LLM to process
return JSON.stringify({
query: query,
results: formattedResults,
resultCount: formattedResults.length
}, null, 2);
} else {
return JSON.stringify({
query: query,
results: [],
resultCount: 0,
message: "No results found for your search query."
});
}
} catch (error) {
console.error("Error in web search tool:", error);
return JSON.stringify({
query: query,
error: \`Error performing search: \${error}\`,
results: []
});
}
}
}
// Bind tools to the LLM
const tools = [new WebSearchTool()];
const llmWithTools = llm.bindTools(tools);
/**
* Agent node that decides whether to use tools or respond directly
*/
const callAgent = async (
state: typeof StateAnnotation.State,
_config: RunnableConfig,
): Promise<typeof StateAnnotation.Update> => {
const messages = state.messages;
// Check if we have tool messages (indicating we've done a search)
const hasToolMessages = messages.some(msg => msg._getType() === "tool");
const lastToolMessage = messages.filter(msg => msg._getType() === "tool").pop();
// System prompt for the agent
const currentDate = new Date().toISOString().split('T')[0];
let systemMessage = new SystemMessage(
\`You are a helpful AI assistant with access to web search capabilities.
Current date: \${currentDate}
Important instructions:
- When searching for information, use the web_search tool
- After receiving search results, ALWAYS provide a complete answer based on those results
- Do NOT make multiple searches for the same topic
- Format your response clearly and cite sources from the search results
- If search results are returned, you MUST provide a final answer, not make another tool call
- When you receive tool results in JSON format, parse and present them in a user-friendly way\`
);
// Get current summarySchema to update with token counts
let updatedSummarySchema = { ...state.summarySchema };
// If we've already done a search, force a response
if (hasToolMessages && lastToolMessage) {
systemMessage = new SystemMessage(
\`You are a helpful AI assistant. You have just received search results.
CRITICAL: You MUST now provide a final answer based on the search results you received.
DO NOT make any more tool calls.
Parse the JSON search results and provide a clear, informative response to the user's question.
Include relevant information from the search results and cite your sources.\`
);
// Use a non-tool version of the LLM to force a response without tools
const response = await llm.invoke([systemMessage, ...messages]);
// Extract token usage from the response metadata
const responseWithMetadata = response as any;
if (responseWithMetadata.response_metadata?.tokenUsage) {
updatedSummarySchema.tokens_input = responseWithMetadata.response_metadata.tokenUsage.promptTokens || 0;
updatedSummarySchema.tokens_output = responseWithMetadata.response_metadata.tokenUsage.completionTokens || 0;
} else if (responseWithMetadata.usage_metadata) {
// Alternative structure for token usage
updatedSummarySchema.tokens_input = responseWithMetadata.usage_metadata.input_tokens || 0;
updatedSummarySchema.tokens_output = responseWithMetadata.usage_metadata.output_tokens || 0;
}
return {
messages: [response],
summarySchema: updatedSummarySchema
};
}
// Otherwise, allow tool usage
const response = await llmWithTools.invoke([systemMessage, ...messages]);
// Extract token usage from the response metadata
const responseWithMetadata = response as any;
if (responseWithMetadata.response_metadata?.tokenUsage) {
updatedSummarySchema.tokens_input = responseWithMetadata.response_metadata.tokenUsage.promptTokens || 0;
updatedSummarySchema.tokens_output = responseWithMetadata.response_metadata.tokenUsage.completionTokens || 0;
} else if (responseWithMetadata.usage_metadata) {
// Alternative structure for token usage
updatedSummarySchema.tokens_input = responseWithMetadata.usage_metadata.input_tokens || 0;
updatedSummarySchema.tokens_output = responseWithMetadata.usage_metadata.output_tokens || 0;
}
return {
messages: [response],
summarySchema: updatedSummarySchema
};
};
/**
* Tool execution node
*/
const executeTools = async (
state: typeof StateAnnotation.State,
_config: RunnableConfig,
): Promise<typeof StateAnnotation.Update> => {
const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
if (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0) {
return { messages: [] };
}
let summarySchema = state.summarySchema;
const toolMessages = await Promise.all(
lastMessage.tool_calls.map(async (toolCall: any) => {
const tool = tools.find((t) => t.name === toolCall.name);
if (!tool) {
return new ToolMessage({
content: \`Error: Tool \${toolCall.name} not found\`,
tool_call_id: toolCall.id!,
});
}
try {
const result = await tool.invoke(toolCall.args);
// If this is a web search tool, parse the results and update summarySchema
if (tool.name === "web_search") {
try {
const parsedResult = JSON.parse(result);
if (parsedResult.results && parsedResult.results.length > 0) {
// Use the first result to populate summarySchema
const firstResult = parsedResult.results[0];
summarySchema = {
title: "add to schema here",
url: "add url to schema here",
contentSummary: "add to schema here",
images: ['some image url here', 'some other image url here'],
sources: ['some source here', 'some other source here'],
files: ['add files here', 'add more files here'],
tokens_output: state.summarySchema.tokens_output || 0,
tokens_input: state.summarySchema.tokens_input || 0,
};
}
} catch (parseError) {
console.error("Error parsing search results for summarySchema:", parseError);
}
}
return new ToolMessage({
content: result,
tool_call_id: toolCall.id!,
});
} catch (error) {
return new ToolMessage({
content: \`Error executing tool: \${error}\`,
tool_call_id: toolCall.id!,
});
}
})
);
// Update tool call count - increment by the number of tool calls made
return {
messages: toolMessages,
toolCallCount: lastMessage.tool_calls.length,
context: toolMessages.map(msg => msg.content as string),
summarySchema: summarySchema,
};
};
/**
* Routing function: Determines whether to execute tools or end the conversation
*/
export const shouldContinue = (
state: typeof StateAnnotation.State,
): "tools" | "__end__" => {
const lastMessage = state.messages[state.messages.length - 1];
// If it's not an AI message, end
if (lastMessage._getType() !== "ai") {
return "__end__";
}
const aiMessage = lastMessage as AIMessage;
// If there are tool calls and we haven't exceeded the limit, execute tools
if (aiMessage.tool_calls &&
aiMessage.tool_calls.length > 0 &&
state.toolCallCount < state.maxToolCalls) {
return "tools";
}
// Otherwise, end the conversation turn
return "__end__";
};
// Build the graph
const builder = new StateGraph(StateAnnotation)
// Add nodes
.addNode("agent", callAgent)
.addNode("tools", executeTools)
// Add edges
.addEdge("__start__", "agent")
.addConditionalEdges("agent", shouldContinue)
.addEdge("tools", "agent");
// Compile the graph
export const graph = builder.compile();
// Set the graph name for LangGraph Studio
graph.name = "Chatbot Agent";
// Export a helper function to format messages for the UI
export const formatMessages = (messages: any[]) => {
return messages.map(msg => {
if (msg._getType() === "human") {
return { role: "user", content: msg.content };
} else if (msg._getType() === "ai") {
return { role: "assistant", content: msg.content };
} else if (msg._getType() === "tool") {
return { role: "tool", content: msg.content };
}
return { role: "system", content: msg.content };
});
};
`;
fs.writeFileSync(path.join(serverPath, 'src/agent/graph.ts'), agentGraphTs);
const agentStateTs = `import { BaseMessage, BaseMessageLike } from "@langchain/core/messages";
import { Annotation, messagesStateReducer } from "@langchain/langgraph";
/**
* A graph's StateAnnotation defines three main things:
* 1. The structure of the data to be passed between nodes (which "channels" to read from/write to and their types)
* 2. Default values for each field
* 3. Reducers for the state's. Reducers are functions that determine how to apply updates to the state.
* See [Reducers](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#reducers) for more information.
*/
// Define the summary schema interface for search results
export interface SummarySchema {
title: string;
url: string;
contentSummary: string;
images: string[];
sources: string[];
files: string[];
tokens_output: number;
tokens_input: number;
}
// This is the primary state of your agent, where you can store any information
export const StateAnnotation = Annotation.Root({
/**
* Messages track the primary execution state of the agent.
*
* Typically accumulates a pattern of:
*
* 1. HumanMessage - user input
* 2. AIMessage with .tool_calls - agent picking tool(s) to use to collect
* information
* 3. ToolMessage(s) - the responses (or errors) from the executed tools
*
* (... repeat steps 2 and 3 as needed ...)
* 4. AIMessage without .tool_calls - agent responding in unstructured
* format to the user.
*
* 5. HumanMessage - user responds with the next conversational turn.
*
* (... repeat steps 2-5 as needed ... )
*
* Merges two lists of messages or message-like objects with role and content,
* updating existing messages by ID.
*
* Message-like objects are automatically coerced by \`messagesStateReducer\` into
* LangChain message classes. If a message does not have a given id,
* LangGraph will automatically assign one.
*
* By default, this ensures the state is "append-only", unless the
* new message has the same ID as an existing message.
*
* Returns:
* A new list of messages with the messages from \`right\` merged into \`left\`.
* If a message in \`right\` has the same ID as a message in \`left\`, the
* message from \`right\` will replace the message from \`left\`\`
*/
messages: Annotation<BaseMessage[], BaseMessageLike[]>({
reducer: messagesStateReducer,
default: () => [],
}),
/**
* Context accumulates information from web searches and other tools
*/
context: Annotation<string[]>({
reducer: (current: string[], update: string[]) => current.concat(update),
default: () => [],
}),
/**
* Track the current search query for web searches
*/
searchQuery: Annotation<string>({
reducer: (current: string, update: string) => update || current,
default: () => "",
}),
/**
* Track whether a web search is needed
*/
needsWebSearch: Annotation<boolean>({
reducer: (current: boolean, update: boolean) => update,
default: () => false,
}),
/**
* Maximum number of tool calls to prevent infinite loops
*/
maxToolCalls: Annotation<number>({
reducer: (current: number, update: number) => update || current,
default: () => 5,
}),
/**
* Current count of tool calls
*/
toolCallCount: Annotation<number>({
reducer: (current: number, update: number) => current + update,
default: () => 0,
}),
/**
* Summary schema for search results - passes around the state
*/
summarySchema: Annotation<SummarySchema>({
reducer: (current: SummarySchema, update: SummarySchema) => update || current,
default: () => ({
title: "",
url: "",
contentSummary: "",
images: [],
sources: [],
files: [],
tokens_output: 0,
tokens_input: 0
}),
}),
/**
* Feel free to add additional attributes to your state as needed.
* Common examples include retrieved documents, extracted entities, API connections, etc.
*
* For simple fields whose value should be overwritten by the return value of a node,
* you don't need to define a reducer or default.
*/
// additionalField: Annotation<string>,
});
`;
fs.writeFileSync(path.join(serverPath, 'src/agent/state.ts'), agentStateTs);
// Create chat files
const chatTs = `import { RequestHandler } from "express";
import { graph } from "../agent/graph.jsx";
import { HumanMessage } from "@langchain/core/messages";
// Chat handler
export const handleChat: RequestHandler = async (req, res) => {
try {
const { message, sessionId } = req.body;
if (!message) {
res.status(400).json({ error: "Message is required" });
return;
}
console.log(\`Processing message for session \${sessionId || "default"}: \${message}\`);
// Create the initial state with the user's message
const initialState = {
messages: [new HumanMessage(message)],
context: [],
searchQuery: "",
needsWebSearch: false,
maxToolCalls: 2,
toolCallCount: 0,
};
// Run the graph
const result = await graph.invoke(initialState, {
configurable: {
thread_id: sessionId || "default",
},
});
// Debug: Log the messages to understand the flow
console.log("Message flow:");
result.messages.forEach((msg: any, idx: number) => {
console.log(\`\${idx}: \${msg._getType()} - \${msg.content?.substring(0, 100) || '(no content)'}\${msg.tool_calls ? ' [has tool calls]' : ''}\`);
});
// Extract the final message from the result
const messages = result.messages;
// Find the last AI message with actual content and no pending tool calls
let finalMessage = messages[messages.length - 1];
for (let i = messages.length - 1; i >= 0; i--) {
const msg = messages[i];
if (msg.getType() === "ai" && msg.content) {
// Check if it's an AI message with tool calls
const aiMsg = msg as any; // Type assertion to access tool_calls
if (!aiMsg.tool_calls || aiMsg.tool_calls.length === 0) {
finalMessage = msg;
break;
}
}
}
// If we couldn't find a complete AI response, there might be an issue
const finalAiMsg = finalMessage as any;
if (!finalMessage.content || (finalMessage.getType() === "ai" && finalAiMsg.tool_calls?.length)) {
console.error("Warning: Could not find a complete AI response. Last message has tool calls or no content.");
}
// Format the response
const response = {
message: finalMessage.content || "I couldn't generate a response. Please try again.",
messageType: finalMessage.getType(),
sessionId: sessionId || "default",
metadata: {
toolsUsed: result.toolCallCount > 0,
searchPerformed: result.context.length > 0,
},
summarySchema: result.summarySchema,
// chainOfThought: formatChainOfThought(messages),
};
res.json(response);
} catch (error) {
console.error("Error processing chat:", error);
res.status(500).json({
error: "An error occurred while processing your message",
details: error instanceof Error ? error.message : "Unknown error",
});
}
};
// Stream chat handler
export const handleChatStream: RequestHandler = async (req, res) => {
try {
const { message, sessionId } = req.body;
if (!message) {
res.status(400).json({ error: "Message is required" });
return;
}
// Set up SSE headers
res.setHeader("Content-Type", "text/event-stream");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
const initialState = {
messages: [new HumanMessage(message)],
context: [],
searchQuery: "",
needsWebSearch: false,
maxToolCalls: 5,
toolCallCount: 0,
};
// Stream the graph execution
const stream = await graph.stream(initialState, {
configurable: {
thread_id: sessionId || "default",
},
});
for await (const chunk of stream) {
// Send each chunk as a server-sent event
res.write(\`data: \${JSON.stringify(chunk)}\`);
}
res.write("data: [DONE]");
res.end();
} catch (error) {
console.error("Error in stream:", error);
res.write(\`data: \${JSON.stringify({ error: "Stream error" })}\`);
res.end();
}
};
// // Helper function to format chain of thought
// const formatChainOfThought = (messages: any[]) => {
// const chainOfThought: any[] = [];
// messages.forEach((msg, idx) => {
// const type = msg._getType();
// // Skip the initial human message (already included in the request)
// if (idx === 0 && type === "human") {
// return;
// }
// let step: any = {
// type: type,
// timestamp: new Date().toISOString(),
// };
// if (type === "ai") {
// const aiMsg = msg as any;
// if (aiMsg.tool_calls && aiMsg.tool_calls.length > 0) {
// // AI message with tool calls
// step.action = "tool_decision";
// step.content = msg.content || "Deciding to use tools...";
// step.tools = aiMsg.tool_calls.map((call: any) => ({
// name: call.name,
// args: call.args
// }));
// } else {
// // AI message with final response
// step.action = "response";
// step.content = msg.content;
// }
// } else if (type === "tool") {
// // Tool execution result
// step.action = "tool_result";
// try {
// // Try to parse tool result if it's JSON
// const parsed = JSON.parse(msg.content);
// step.content = parsed;
// } catch {
// // If not JSON, keep as string
// step.content = msg.content;
// }
// } else {
// // Other message types
// step.content = msg.content;
// }
// chainOfThought.push(step);
// });
// return chainOfThought;
// };
`;
fs.writeFileSync(path.join(serverPath, 'src/chat/chat.ts'), chatTs);
// Create config files
const envTs = `/**
* Environment Configuration for LangGraph Chatbot
* Ensures all required environment variables are loaded and available
*/
import { config } from "dotenv";
import * as readline from "readline";
// Load environment variables from .env file
config();
/**
* Prompts for an environment variable if not found
*/
async function ensureEnvVar(varName: string): Promise<void> {
if (!process.env[varName]) {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
try {
const value = await new Promise<string>((resolve) => {
rl.question(\`\${varName}: \`, (answer) => {
resolve(answer);
});
});
if (value.trim()) {
process.env[varName] = value.trim();
} else {
console.error(\`❌ \${varName} is required but was not provided\`);
process.exit(1);
}
} finally {
rl.close();
}
}
}
/**
* Initialize and validate all required environment variables
*/
export async function initializeEnvironment(): Promise<void> {
console.log("🔧 Initializing environment configuration...");
// Required environment variables
const requiredVars = [
"OPENAI_API_KEY",
"TAVILY_API_KEY",
"LANGCHAIN_API_KEY"
];
// Ensure all required variables are present
for (const varName of requiredVars) {
await ensureEnvVar(varName);
}
// Set up additional environment variables with defaults
process.env.LANGCHAIN_TRACING_V2 = process.env.LANGCHAIN_TRACING_V2 || "true";
process.env.LANGCHAIN_PROJECT = "${projectName}";
process.env.LANGCHAIN_ENDPOINT = process.env.LANGCHAIN_ENDPOINT || "https://api.smith.langchain.com";
process.env.NODE_ENV = process.env.NODE_ENV || "development";
process.env.PORT = process.env.PORT || "3000";
// Optional variables with defaults
if (!process.env.REDIS_URI) {
process.env.REDIS_URI = "redis://localhost:6379";
}
if (!process.env.POSTGRES_URI) {
process.env.POSTGRES_URI = "postgres://postgres:postgres@localhost:5432/langgraph?sslmode=disable";
}
console.log(\`✅ Environment configuration complete\`);
console.log(\`📊 LangSmith tracing: \${process.env.LANGCHAIN_TRACING_V2}\`);
console.log(\`📁 LangSmith project: \${process.env.LANGCHAIN_PROJECT}\`);
console.log(\`🌐 Environment: \${process.env.NODE_ENV}\`);
console.log(\`🚀 Port: \${process.env.PORT}\`);
}
/**
* Validate that all required environment variables are set
* (for production environments where prompting is not possible)
*/
export function validateEnvironment(): void {
const requiredVars = [
"OPENAI_API_KEY",
"TAVILY_API_KEY",
"LANGCHAIN_API_KEY"
];
const missingVars = requiredVars.filter(varName => !process.env[varName]);
if (missingVars.length > 0) {
console.error(\`❌ Missing required environment variables:\`);
missingVars.forEach(varName => {
console.error(\` - \${varName}\`);
});
console.error(\`Please set these environment variables and try again.\\n
OpenAi: https://platform.openai.com/docs/api-reference/introduction,\\n
Tavily: https://tavily.ai/api,\\n
LangChain: https://smith.langchain.com/ to get the API keys.\`);
process.exit(1);
}
// Set defaults for optional variables
process.env.LANGCHAIN_TRACING_V2 = process.env.LANGCHAIN_TRACING_V2 || "true";
process.env.LANGCHAIN_PROJECT = process.env.LANGCHAIN_PROJECT || "chatbot-agent";
process.env.LANGCHAIN_ENDPOINT = process.env.LANGCHAIN_ENDPOINT || "https://api.smith.langchain.com";
}
/**
* Get environment configuration for display
*/
export function getEnvironmentInfo(): Record<string, string | undefined> {
return {
NODE_ENV: process.env.NODE_ENV,
PORT: process.env.PORT,
LANGCHAIN_TRACING_V2: process.env.LANGCHAIN_TRACING_V2,
LANGCHAIN_PROJECT: process.env.LANGCHAIN_PROJECT,
LANGCHAIN_ENDPOINT: process.env.LANGCHAIN_ENDPOINT,
REDIS_URI: process.env.REDIS_URI,
POSTGRES_URI: process.env.POSTGRES_URI,
// Don't expose API keys for security
OPENAI_API_KEY: process.env.OPENAI_API_KEY ? "✅ Set" : "❌ Missing",
TAVILY_API_KEY: process.env.TAVILY_API_KEY ? "✅ Set" : "❌ Missing",
LANGCHAIN_API_KEY: process.env.LANGCHAIN_API_KEY ? "✅ Set" : "❌ Missing",
};
}
`;
fs.writeFileSync(path.join(serverPath, 'src/config/env.ts'), envTs);
// Create tools files
const toolsTs = `import { RequestHandler } from "express";
// List tools handler
export const listTools: RequestHandler = (_req, res) => {
res.json({
tools: [
{
name: "web_search",
description: "Search the web for current information about any topic",
},
],
});
};
`;
fs.writeFileSync(path.join(serverPath, 'src/tools/tools.ts'), toolsTs);
// Create Dockerfile
const dockerfile = `# Use official Node.js image as base
FROM node:20-slim AS base
# Install dependencies for better container handling
RUN apt-get update -qq \
&& apt-get install -y --no-install-recommends \
ca-certificates \
curl \
dumb-init \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Stage 1: Dependencies
FROM base AS deps
WORKDIR /app
# Copy package files
COPY package.json yarn.lock ./
# Install production dependencies
RUN yarn install --frozen-lockfile --production
# Stage 2: Build
FROM base AS build
WORKDIR /app
# Copy package files
COPY package.json yarn.lock ./
# Install all dependencies (including dev)
RUN yarn install --frozen-lockfile
# Copy source code
COPY . .
# Build the TypeScript code
RUN yarn build
# Stage 3: Production
FROM base AS runtime
# Install LangGraph CLI globally
RUN npm install -g @langchain/cli
WORKDIR /app
# Copy production dependencies
COPY --from=deps /app/node_modules ./node_modules
# Copy built application
COPY --from=build /app/dist ./dist
COPY --from=build /app/package.json ./
COPY --from=build /app/langgraph.json ./
# Create non-root user
RUN useradd -m -u 1001 appuser && chown -R appuser:appuser /app
USER appuser
# Set environment variables
ENV NODE_ENV=production
ENV LANGCHAIN_TRACING_V2=true
# Expose port
EXPOSE 3000
# Use dumb-init to handle signals properly
ENTRYPOINT ["dumb-init", "--"]
# Start the application
CMD ["node", "dist/server.js","langgraph","serve","--host","0.0.0.0","--port","3000"]
`;
fs.writeFileSync(path.join(serverPath, 'dockerfile'), dockerfile);
// Create .gitignore
const gitignore = `node_modules
dist
dist-ssr
index.cjs
index.js
index.d.ts
.yarn/*
!.yarn/patches
!.yarn/plugins
!.yarn/releases
!.yarn/sdks
!.yarn/versions
.turbo
**/.turbo
**/.eslintcache
.env
.ipynb_checkpoints
\# LangGraph API
.langgraph_api
`;
fs.writeFileSync(path.join(serverPath, '.gitignore'), gitignore);
// Create .dockerignore
const dockerignore = `node_modules
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
\# Build output
dist/
*.tsbuildinfo
\# Environment files
.env
.env.local
.env.*.local
\# IDE files
.vscode/
.idea/
*.swp
*.swo
*~
\# OS files
.DS_Store
Thumbs.db
\# Git
.git/
.gitignore
\# Documentation
README.md
docs/
\# Testing
coverage/
.nyc_output/
*.test.ts
*.spec.ts
__tests__/
tests/
\# Development files
.eslintrc*
.prettierrc*
jest.config.*
tsconfig.tsbuildinfo
\# CI/CD
.github/
.gitlab-ci.yml
.circleci/
\# Docker files (avoid recursion)
Dockerfile
.dockerignore
docker-compose*.yml
`;
fs.writeFileSync(path.join(serverPath, '.dockerignore'), dockerignore);
// Create .env template
const envTemplate = `# OAuth Configuration
OAUTH_CLIENT_ID=your_client_id_here
OAUTH_CLIENT_SECRET=your_client_secret_here
OAUTH_AUTH_URL=https://your-provider.com/oauth/authorize
OAUTH_TOKEN_URL=https://your-provider.com/oauth/token
OAUTH_REDIRECT_URI=http://localhost:3000/api/oauth/callback
# OpenAI Configuration - REQUIRED
OPENAI_API_KEY=
# Tavily API for Web Search - REQUIRED
TAVILY_API_KEY=
# LangSmith Configuration for Telemetry - OPTIONAL
LANGCHAIN_TRACING_V2=true
LANGCHAIN_PROJECT=${projectName}
LANGCHAIN_API_KEY=
LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
# Server Configuration
NODE_ENV=development
# Server Configuration
PORT=3000
B2_MODEL_BUCKET_ID_API_KEY=
B2_MODEL_BUCKET_API_KEY=
NEXT_AGENT_ADDRESS=
THIS_AGENT_ADDRESS=
`;
fs.writeFileSync(path.join(serverPath, '.env'), envTemplate);
}
async function createPythonBackend(serverPath, integration = 'quickbooks', projectName) {
// Create requirements.txt with additional model dependencies
const requirements = `
fastapi==0.109.2
uvicorn[standard]==0.27.1
python-dotenv==1.0.0
httpx==0.27.2
python-multipart==0.0.19
wheel
# LangGraph and LangChain dependencies
langgraph
langchain-core
langchain==0.3.15
langchain-openai
langchain-community
tavily-python
# Additional dependencies
pydantic===2.7.4
redis==5.0.1
asyncpg==0.29.0
# IMPORTANT: PyTorch must be installed before flash_attn
# Install PyTorch and related dependencies first
torch>=2.0.0
torchvision
accelerate>=0.25.0
# Hugging Face and model dependencies
transformers>=4.36.0
huggingface-hub>=0.19.0
peft
# Flux.1-schnell image generation model dependencies
# FluxKontext requires diffusers from main branch
git+https://github.com/huggingface/diffusers.git
Pillow>=10.0.0
sentencepiece>=0.1.99
protobuf>=3.20.0
safetensors>=0.4.0
numpy
soundfile
backoff
scipy
# Memory optimization and B2 download dependencies
psutil>=5.9.0
bitsandbytes>=0.41.0
# xformers>=0.0.20
b2sdk>=2.0.0
b2>=3.19.1
# deep speeed requires the cud nvcc compiler,
# if you want tensor parralelism replace the docker image with 'FROM nvidia/cuda:12.2.2-devel-ubuntu22.04'
# deepspeed
# Flash Attention pre-built wheel (avoids nvcc compilation)
# https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.3.12/flash_attn-2.8.0+cu128torch2.7-cp311-cp311-linux_x86_64.whl
`;
fs.writeFileSync(path.join(serverPath, 'requirements.txt'), requirements);
// Create model management files
await createModelFiles(serverPath);
// Create agent files
await createAgentFiles(serverPath);
// Create chat files
await createChatFiles(serverPath);
// Create config files
await createEnvConfigFiles(serverPath);
// Create tools files
await createToolsFiles(serverPath);
// Create integrations files
await createIntegrationsFiles(serverPath);
// Create langgraph.json
const langgraphJson = {
"python_version": "3.11",
"graphs": {
"chatbot": "./agent/graph.py:graph"
},
"env": ".env",
"dependencies": [
"."
],
"image_distro": "wolfi",
"dockerfile_lines": []
} ;
fs.writeJsonSync(path.join(serverPath, 'langgraph.json'), langgraphJson, { spaces: 2 });
// Create langgraph.json
const appState =`# app_state.py
# we keep the active model here: can be extended to keep more than a single model active.
import os
import aiohttp
import logging
from typing import Dict, Any, Optional
from fastapi import WebSocket
logger = logging.getLogger(__name__)
class AppState:
def __init__(self):
self.active_model = "gpt-4o-mini"
# WebSocket connections for thought streaming
self.thought_websockets: Dict[str, WebSocket] = {}
def get_active_model(self):
return self.active_model
def set_active_model(self, model_name):
# Clear cache for the old model to ensure fresh reload
from models.model_loader import model_loader
if self.active_model != model_name:
# Clear the old model's cache when switching
model_loader.clear_cache(self.active_model)
# Optionally clear the new model's cache too if you want fresh reload
# model_loader.clear_cache(model_name)
self.active_model = model_name
#########################################################################################