@digilogiclabs/saas-factory-ai
Version:
Next.js 15 Compatible AI Integration Platform - Drop-in ready with server/client separation for seamless React Server Components support.
1,424 lines (1,413 loc) • 44.9 kB
JavaScript
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
};
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/shared/types.ts
var AICapability;
var init_types = __esm({
"src/shared/types.ts"() {
"use strict";
AICapability = /* @__PURE__ */ ((AICapability2) => {
AICapability2["TEXT_CHAT"] = "text.chat";
AICapability2["TEXT_COMPLETION"] = "text.completion";
AICapability2["TEXT_EMBEDDINGS"] = "text.embeddings";
AICapability2["IMAGE_GENERATION"] = "image.gen";
AICapability2["IMAGE_ANALYSIS"] = "image.analysis";
AICapability2["AUDIO_GENERATION"] = "audio.gen";
AICapability2["AUDIO_TRANSCRIPTION"] = "audio.transcription";
AICapability2["VIDEO_GENERATION"] = "video.gen";
AICapability2["VIDEO_ANALYSIS"] = "video.analysis";
AICapability2["CODE_GENERATION"] = "code.generation";
AICapability2["ANALYSIS"] = "analysis";
return AICapability2;
})(AICapability || {});
}
});
// src/server/aiUtils.ts
var aiUtils_exports = {};
__export(aiUtils_exports, {
createAIHub: () => createAIHub
});
function createAIHub(config) {
return new ServerAIHub(config);
}
var ServerAIHub;
var init_aiUtils = __esm({
"src/server/aiUtils.ts"() {
"use strict";
ServerAIHub = class {
constructor(config) {
this.config = config;
}
async chat(messages, options) {
try {
const response = await fetch(`${this.config.gatewayUrl}/v1/chat`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`
},
body: JSON.stringify({ messages, ...options })
});
if (!response.ok) {
throw new Error(`AI request failed: ${response.statusText}`);
}
if (options?.stream) {
return this.handleStreamingResponse(response);
}
const result = await response.json();
return result.content || result.message || "No response received";
} catch (error) {
console.error("AI Chat Error:", error);
throw new Error(`Chat failed: ${error instanceof Error ? error.message : "Unknown error"}`);
}
}
async generateVideo(prompt, options) {
try {
const response = await fetch(`${this.config.gatewayUrl}/v1/video/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`
},
body: JSON.stringify({ prompt, ...options })
});
if (!response.ok) {
throw new Error(`Video generation failed: ${response.statusText}`);
}
const result = await response.json();
return {
jobId: result.jobId,
status: "queued",
estimatedTimeMs: result.estimatedTimeMs
};
} catch (error) {
console.error("Video Generation Error:", error);
throw new Error(`Video generation failed: ${error instanceof Error ? error.message : "Unknown error"}`);
}
}
async generateAudio(prompt, options) {
try {
const response = await fetch(`${this.config.gatewayUrl}/v1/audio/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`
},
body: JSON.stringify({ prompt, ...options })
});
if (!response.ok) {
throw new Error(`Audio generation failed: ${response.statusText}`);
}
const result = await response.json();
return {
jobId: result.jobId,
status: "queued",
estimatedTimeMs: result.estimatedTimeMs
};
} catch (error) {
console.error("Audio Generation Error:", error);
throw new Error(`Audio generation failed: ${error instanceof Error ? error.message : "Unknown error"}`);
}
}
async analyze(content, analysisType) {
try {
const response = await fetch(`${this.config.gatewayUrl}/v1/analyze`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`
},
body: JSON.stringify({ content, type: analysisType })
});
if (!response.ok) {
throw new Error(`Analysis failed: ${response.statusText}`);
}
const result = await response.json();
return {
type: analysisType,
score: result.score || 0,
confidence: result.confidence || 0,
details: result.details || {},
metadata: result.metadata
};
} catch (error) {
console.error("Analysis Error:", error);
throw new Error(`Analysis failed: ${error instanceof Error ? error.message : "Unknown error"}`);
}
}
async *handleStreamingResponse(response) {
const reader = response.body?.getReader();
const decoder = new TextDecoder();
if (!reader)
throw new Error("No response body for streaming");
try {
while (true) {
const { done, value } = await reader.read();
if (done)
break;
const chunk = decoder.decode(value);
const lines = chunk.split("\n");
for (const line of lines) {
if (line.startsWith("data: ")) {
const data = line.slice(6);
if (data === "[DONE]")
return;
try {
const parsed = JSON.parse(data);
if (parsed.content) {
yield parsed.content;
}
} catch (e) {
}
}
}
}
} finally {
reader.releaseLock();
}
}
};
}
});
// src/presets/ProjectPresets.ts
function getPreset(name, overrides = {}) {
const preset = PROJECT_PRESETS[name];
if (!preset)
return null;
return {
...preset,
config: {
...preset.config,
...overrides,
providers: {
...preset.config.providers,
...overrides.providers
},
features: {
...preset.config.features,
...overrides.features
}
}
};
}
var ECOMMERCE_PRESET, CONTENT_CREATOR_PRESET, EDUCATIONAL_PRESET, HEALTHCARE_PRESET, FINANCIAL_PRESET, GAMING_PRESET, CUSTOMER_SERVICE_PRESET, DEVELOPER_PRESET, RESEARCH_PRESET, PROJECT_PRESETS;
var init_ProjectPresets = __esm({
"src/presets/ProjectPresets.ts"() {
"use strict";
ECOMMERCE_PRESET = {
name: "E-Commerce AI Assistant",
description: "Complete AI solution for e-commerce platforms with product recommendations, customer support, and content generation",
config: {
gatewayUrl: "",
apiKey: "",
preset: "custom",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings", "image.gen"],
priority: 1,
maxRetries: 3
},
anthropic: {
capabilities: ["text.chat"],
priority: 2,
maxRetries: 2
}
},
routing: {
strategy: "quality",
fallback: true,
loadBalancing: true,
customRouting: (capability) => {
if (capability === "text.embeddings")
return "openai";
if (capability === "image.gen")
return "openai";
return "anthropic";
}
},
features: {
caching: {
enabled: true,
ttl: 600,
// 10 minutes for product data
storage: "redis"
},
rateLimiting: {
enabled: true,
requests: 1e3,
windowMs: 36e5,
// 1 hour
strategy: "sliding-window"
},
monitoring: {
enabled: true,
metrics: ["latency", "cost", "usage"],
webhooks: []
}
}
},
components: [
"ProductRecommendationChat",
"CustomerSupportBot",
"ProductDescriptionGenerator",
"ReviewAnalyzer",
"InventoryAlerts"
],
setupInstructions: `
1. Configure your product catalog API endpoint
2. Set up customer data integration
3. Train the recommendation engine with your product data
4. Customize the chat personality for your brand
5. Set up webhooks for order processing
`,
examples: [
{
name: "Product Recommendation",
input: "I need a laptop for gaming",
output: "Based on your gaming needs, I recommend..."
},
{
name: "Customer Support",
input: "Where is my order?",
output: "Let me check your order status..."
}
]
};
CONTENT_CREATOR_PRESET = {
name: "Content Creator Platform",
description: "AI-powered content creation suite with video, audio, text generation and social media optimization",
config: {
gatewayUrl: "",
apiKey: "",
preset: "content-generator",
providers: {
openai: {
capabilities: ["text.chat", "image.gen"],
priority: 2
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Better for creative writing
},
gemini: {
capabilities: ["video.gen", "text.chat"],
priority: 1
// Best for video generation
}
},
routing: {
strategy: "quality",
fallback: true,
customRouting: (capability) => {
if (capability === "video.gen")
return "gemini";
if (capability === "image.gen")
return "openai";
return "anthropic";
}
},
features: {
rateLimiting: {
enabled: true,
requests: 500,
windowMs: 36e5
},
monitoring: {
enabled: true,
metrics: ["cost", "usage", "quality"]
}
}
},
components: [
"BlogPostGenerator",
"SocialMediaScheduler",
"VideoScriptWriter",
"ThumbnailGenerator",
"SEOOptimizer",
"ContentAnalyzer"
],
examples: [
{
name: "Blog Post Generation",
input: { topic: "AI in Healthcare", tone: "professional", length: "long" },
output: "Comprehensive blog post with SEO optimization"
}
]
};
EDUCATIONAL_PRESET = {
name: "Educational Platform",
description: "Comprehensive AI tutoring system with personalized learning, assessments, and progress tracking",
config: {
gatewayUrl: "",
apiKey: "",
preset: "educator",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings"],
priority: 1
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Excellent for educational content
}
},
routing: {
strategy: "quality",
fallback: true
},
features: {
caching: {
enabled: true,
ttl: 1800,
// 30 minutes for educational content
storage: "redis"
},
security: {
auditLogging: true,
// Important for educational compliance
encryption: true
},
monitoring: {
enabled: true,
metrics: ["usage", "latency", "errors"]
}
}
},
components: [
"AdaptiveTutor",
"AssessmentGenerator",
"ProgressTracker",
"StudyPlanCreator",
"QuizMaster",
"LearningAnalytics"
],
examples: [
{
name: "Personalized Tutoring",
input: { subject: "Mathematics", level: "high-school", topic: "Calculus" },
output: "Adaptive tutoring session with practice problems"
}
]
};
HEALTHCARE_PRESET = {
name: "Healthcare AI Assistant",
description: "HIPAA-compliant AI assistant for healthcare providers with symptom analysis and medical research support",
config: {
gatewayUrl: "",
apiKey: "",
preset: "custom",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings"],
priority: 1
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Careful, nuanced responses for healthcare
}
},
routing: {
strategy: "quality",
fallback: true
},
features: {
security: {
encryption: true,
auditLogging: true,
// Required for HIPAA compliance
apiKeyRotation: true
},
rateLimiting: {
enabled: true,
requests: 200,
windowMs: 36e5
},
monitoring: {
enabled: true,
metrics: ["usage", "errors", "latency"],
webhooks: []
// For compliance alerts
}
}
},
components: [
"SymptomChecker",
"MedicalResearchAssistant",
"PatientCommunicationHelper",
"DrugInteractionChecker",
"ClinicalDecisionSupport"
],
setupInstructions: `
\u26A0\uFE0F IMPORTANT: This preset requires HIPAA compliance configuration
1. Ensure all data is encrypted at rest and in transit
2. Set up audit logging for all patient interactions
3. Configure secure API endpoints with proper authentication
4. Implement data retention policies per healthcare regulations
5. Regular security audits and vulnerability assessments required
`
};
FINANCIAL_PRESET = {
name: "Financial Services AI",
description: "AI-powered financial analysis, risk assessment, and customer service for fintech applications",
config: {
gatewayUrl: "",
apiKey: "",
preset: "analyst",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings"],
priority: 1
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Excellent for financial analysis
}
},
routing: {
strategy: "cost",
// Cost-sensitive for high-volume financial operations
fallback: true
},
features: {
security: {
encryption: true,
auditLogging: true,
apiKeyRotation: true
},
rateLimiting: {
enabled: true,
requests: 2e3,
windowMs: 36e5
},
caching: {
enabled: true,
ttl: 300,
// 5 minutes for market data
storage: "redis"
}
}
},
components: [
"MarketAnalyzer",
"RiskAssessment",
"PortfolioOptimizer",
"FraudDetection",
"CustomerServiceBot",
"RegulatorReporting"
]
};
GAMING_PRESET = {
name: "Gaming Platform AI",
description: "Dynamic AI for gaming: NPCs, procedural content, player assistance, and community management",
config: {
gatewayUrl: "",
apiKey: "",
preset: "custom",
providers: {
openai: {
capabilities: ["text.chat", "image.gen"],
priority: 1
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Great for creative game narratives
},
gemini: {
capabilities: ["video.gen"],
priority: 1
// For game trailers and cutscenes
}
},
routing: {
strategy: "speed",
// Low latency critical for gaming
fallback: true,
loadBalancing: true
},
features: {
caching: {
enabled: true,
ttl: 60,
// 1 minute for dynamic game content
storage: "memory"
},
rateLimiting: {
enabled: true,
requests: 5e3,
windowMs: 36e5
}
}
},
components: [
"DynamicNPC",
"QuestGenerator",
"GameMaster",
"PlayerHelper",
"CommunityModerator",
"ContentGenerator"
]
};
CUSTOMER_SERVICE_PRESET = {
name: "Customer Service Platform",
description: "Intelligent customer service with multilingual support, sentiment analysis, and escalation management",
config: {
gatewayUrl: "",
apiKey: "",
preset: "chatbot",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings"],
priority: 2
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Better for empathetic customer service
}
},
routing: {
strategy: "speed",
// Fast response times for customer service
fallback: true
},
features: {
caching: {
enabled: true,
ttl: 1800,
// 30 minutes for FAQ responses
storage: "redis"
},
rateLimiting: {
enabled: true,
requests: 1e4,
windowMs: 36e5
},
monitoring: {
enabled: true,
metrics: ["latency", "usage", "errors"],
webhooks: []
// For escalation alerts
}
}
},
components: [
"CustomerServiceBot",
"SentimentAnalyzer",
"EscalationManager",
"KnowledgeBase",
"TicketClassifier",
"SatisfactionSurvey"
]
};
DEVELOPER_PRESET = {
name: "Developer Tools AI",
description: "AI-powered development assistance with code generation, review, testing, and documentation",
config: {
gatewayUrl: "",
apiKey: "",
preset: "developer",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings"],
priority: 1
// Excellent for code generation
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Great for code review and explanations
}
},
routing: {
strategy: "quality",
fallback: true
},
features: {
caching: {
enabled: true,
ttl: 3600,
// 1 hour for code snippets
storage: "redis"
},
monitoring: {
enabled: true,
metrics: ["usage", "latency", "cost", "errors"]
}
}
},
components: [
"CodeGenerator",
"CodeReviewer",
"TestGenerator",
"DocumentationWriter",
"BugHunter",
"PerformanceAnalyzer"
]
};
RESEARCH_PRESET = {
name: "Research Platform AI",
description: "Academic and scientific research assistant with literature review, data analysis, and hypothesis generation",
config: {
gatewayUrl: "",
apiKey: "",
preset: "analyst",
providers: {
openai: {
capabilities: ["text.chat", "text.embeddings"],
priority: 1
},
anthropic: {
capabilities: ["text.chat"],
priority: 1
// Excellent for research analysis
}
},
routing: {
strategy: "quality",
fallback: true
},
features: {
caching: {
enabled: true,
ttl: 7200,
// 2 hours for research data
storage: "redis"
},
monitoring: {
enabled: true,
metrics: ["usage", "cost", "quality"]
}
}
},
components: [
"LiteratureReviewer",
"DataAnalyzer",
"HypothesisGenerator",
"CitationManager",
"ResearchPlanner",
"PeerReviewAssistant"
]
};
PROJECT_PRESETS = {
"ecommerce": ECOMMERCE_PRESET,
"content-creator": CONTENT_CREATOR_PRESET,
"education": EDUCATIONAL_PRESET,
"healthcare": HEALTHCARE_PRESET,
"financial": FINANCIAL_PRESET,
"gaming": GAMING_PRESET,
"customer-service": CUSTOMER_SERVICE_PRESET,
"developer": DEVELOPER_PRESET,
"research": RESEARCH_PRESET
};
}
});
// src/server/quickStartAI.ts
var quickStartAI_exports = {};
__export(quickStartAI_exports, {
quickStartAI: () => quickStartAI,
simpleAnalysis: () => simpleAnalysis,
simpleChat: () => simpleChat,
simpleVideoGeneration: () => simpleVideoGeneration
});
function quickStartAI(options) {
if (!options.apiKey) {
throw new Error("API key is required. Get one from https://platform.openai.com or your AI provider.");
}
const gatewayUrl = options.gatewayUrl || process.env.NEXT_PUBLIC_AI_GATEWAY_URL || "https://ai-gateway.digilogiclabs.com";
const projectType = options.projectType || "custom";
const environment = options.environment || "development";
let config;
if (projectType !== "custom" && PROJECT_PRESETS[projectType]) {
const preset = getPreset(projectType, {
gatewayUrl,
apiKey: options.apiKey
});
config = preset.config;
console.log(`\u{1F680} Quick Start AI (Server): Configured for ${projectType} project`);
} else {
config = createCustomConfig(options);
console.log("\u{1F680} Quick Start AI (Server): Using custom configuration");
}
applyEnvironmentSettings(config, environment);
applyQuickToggles(config, options);
if (options.customProvider) {
config.providers = config.providers || {};
config.providers.custom = config.providers.custom || {};
config.providers.custom[options.customProvider.name] = {
name: options.customProvider.name,
type: "custom",
endpoint: options.customProvider.endpoint,
apiKey: options.customProvider.apiKey,
capabilities: options.customProvider.capabilities,
priority: 1
};
}
const hub = createAIHub(config);
console.log("\u2705 Server AI Hub ready! Available methods: chat(), generateVideo(), generateAudio()");
return hub;
}
async function simpleChat(options) {
const { message, apiKey, ...quickStartOptions } = options;
if (!message) {
throw new Error("Message parameter is required");
}
const hub = quickStartAI({ apiKey, ...quickStartOptions });
const response = await hub.chat([{ role: "user", content: message }]);
return typeof response === "string" ? response : "Sorry, I encountered an issue.";
}
async function simpleVideoGeneration(options) {
const { prompt, apiKey, ...quickStartOptions } = options;
if (!prompt) {
throw new Error("Prompt parameter is required");
}
const hub = quickStartAI({ apiKey, ...quickStartOptions });
const result = await hub.generateVideo(prompt);
return result.jobId;
}
async function simpleAnalysis(options) {
const { content, analysisType, apiKey, ...quickStartOptions } = options;
if (!content || !analysisType) {
throw new Error("Content and analysisType parameters are required");
}
const hub = quickStartAI({ apiKey, ...quickStartOptions });
return await hub.analyze(content, analysisType);
}
function createCustomConfig(options) {
const config = {
gatewayUrl: options.gatewayUrl || "https://ai-gateway.digilogiclabs.com",
apiKey: options.apiKey,
providers: {},
routing: {
strategy: "quality",
fallback: true,
loadBalancing: false
},
features: {
caching: { enabled: false },
rateLimiting: { enabled: false },
monitoring: { enabled: false }
}
};
const capabilities = options.capabilities || ["chat"];
const primaryProvider = options.primaryProvider || "openai";
if (capabilities.includes("chat") || capabilities.includes("embeddings")) {
config.providers.openai = {
capabilities: ["text.chat" /* TEXT_CHAT */, "text.embeddings" /* TEXT_EMBEDDINGS */, "image.gen" /* IMAGE_GENERATION */],
priority: primaryProvider === "openai" ? 1 : 2
};
config.providers.anthropic = {
capabilities: ["text.chat" /* TEXT_CHAT */],
priority: primaryProvider === "anthropic" ? 1 : 2
};
}
if (capabilities.includes("video")) {
config.providers.gemini = {
capabilities: ["video.gen" /* VIDEO_GENERATION */, "text.chat" /* TEXT_CHAT */],
priority: primaryProvider === "gemini" ? 1 : 2
};
}
return config;
}
function applyEnvironmentSettings(config, environment) {
if (environment === "production") {
config.features.monitoring = { enabled: true, metrics: ["latency", "cost", "errors", "usage"] };
config.features.rateLimiting = { enabled: true, requests: 1e3, windowMs: 36e5 };
config.features.security = { encryption: true, auditLogging: true };
} else {
config.features.monitoring = { enabled: true, metrics: ["latency", "errors"] };
config.features.rateLimiting = { enabled: true, requests: 100, windowMs: 36e5 };
}
}
function applyQuickToggles(config, options) {
if (options.enableCaching !== void 0) {
config.features.caching = { enabled: options.enableCaching, ttl: 300, storage: "memory" };
}
if (options.enableMonitoring !== void 0) {
config.features.monitoring = {
enabled: options.enableMonitoring,
metrics: options.enableMonitoring ? ["latency", "cost", "usage"] : []
};
}
if (options.enableRateLimit !== void 0) {
config.features.rateLimiting = {
enabled: options.enableRateLimit,
requests: 1e3,
windowMs: 36e5
};
}
}
var init_quickStartAI = __esm({
"src/server/quickStartAI.ts"() {
"use strict";
init_aiUtils();
init_ProjectPresets();
}
});
// src/client/AIProvider.tsx
var AIProvider_exports = {};
__export(AIProvider_exports, {
AIProvider: () => AIProvider,
useAI: () => useAI
});
function AIProvider({
children,
config,
apiKey,
gatewayUrl = process.env.NEXT_PUBLIC_AI_GATEWAY_URL || "https://ai-gateway.digilogiclabs.com"
}) {
const [isLoading, setIsLoading] = (0, import_react.useState)(false);
const [error, setError] = (0, import_react.useState)(null);
const effectiveConfig = config || {
apiKey: apiKey || process.env.NEXT_PUBLIC_AI_API_KEY || "",
gatewayUrl,
environment: "development"
};
if (!effectiveConfig.apiKey) {
console.warn("AIProvider: No API key provided. Some features may not work.");
}
const chat = (0, import_react.useCallback)(async (messages, options) => {
if (!effectiveConfig.apiKey) {
throw new Error("API key is required for chat functionality");
}
setIsLoading(true);
setError(null);
try {
const response = await fetch(`${effectiveConfig.gatewayUrl}/v1/chat`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${effectiveConfig.apiKey}`
},
body: JSON.stringify({ messages, ...options })
});
if (!response.ok) {
throw new Error(`Chat request failed: ${response.statusText}`);
}
if (options?.stream) {
return handleStreamingResponse(response);
}
const result = await response.json();
return result.content || result.message || "No response received";
} catch (err) {
const errorMessage = err instanceof Error ? err.message : "Chat failed";
setError(errorMessage);
throw new Error(errorMessage);
} finally {
setIsLoading(false);
}
}, [effectiveConfig]);
const generateVideo = (0, import_react.useCallback)(async (prompt, options) => {
if (!effectiveConfig.apiKey) {
throw new Error("API key is required for video generation");
}
setIsLoading(true);
setError(null);
try {
const response = await fetch(`${effectiveConfig.gatewayUrl}/v1/video/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${effectiveConfig.apiKey}`
},
body: JSON.stringify({ prompt, ...options })
});
if (!response.ok) {
throw new Error(`Video generation failed: ${response.statusText}`);
}
const result = await response.json();
return {
jobId: result.jobId,
status: "queued",
estimatedTimeMs: result.estimatedTimeMs
};
} catch (err) {
const errorMessage = err instanceof Error ? err.message : "Video generation failed";
setError(errorMessage);
throw new Error(errorMessage);
} finally {
setIsLoading(false);
}
}, [effectiveConfig]);
const generateAudio = (0, import_react.useCallback)(async (prompt, options) => {
if (!effectiveConfig.apiKey) {
throw new Error("API key is required for audio generation");
}
setIsLoading(true);
setError(null);
try {
const response = await fetch(`${effectiveConfig.gatewayUrl}/v1/audio/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${effectiveConfig.apiKey}`
},
body: JSON.stringify({ prompt, ...options })
});
if (!response.ok) {
throw new Error(`Audio generation failed: ${response.statusText}`);
}
const result = await response.json();
return {
jobId: result.jobId,
status: "queued",
estimatedTimeMs: result.estimatedTimeMs
};
} catch (err) {
const errorMessage = err instanceof Error ? err.message : "Audio generation failed";
setError(errorMessage);
throw new Error(errorMessage);
} finally {
setIsLoading(false);
}
}, [effectiveConfig]);
const clearError = (0, import_react.useCallback)(() => {
setError(null);
}, []);
const contextValue = {
config: effectiveConfig,
isLoading,
error,
chat,
generateVideo,
generateAudio,
clearError
};
return /* @__PURE__ */ (0, import_jsx_runtime.jsx)(AIContext.Provider, { value: contextValue, children });
}
function useAI() {
const context = (0, import_react.useContext)(AIContext);
if (!context) {
throw new Error(
"useAI must be used within an AIProvider. Make sure to wrap your component tree with <AIProvider>."
);
}
return context;
}
async function* handleStreamingResponse(response) {
const reader = response.body?.getReader();
const decoder = new TextDecoder();
if (!reader)
throw new Error("No response body for streaming");
try {
while (true) {
const { done, value } = await reader.read();
if (done)
break;
const chunk = decoder.decode(value);
const lines = chunk.split("\n");
for (const line of lines) {
if (line.startsWith("data: ")) {
const data = line.slice(6);
if (data === "[DONE]")
return;
try {
const parsed = JSON.parse(data);
if (parsed.content) {
yield parsed.content;
}
} catch (e) {
}
}
}
}
} finally {
reader.releaseLock();
}
}
var import_react, import_jsx_runtime, AIContext;
var init_AIProvider = __esm({
"src/client/AIProvider.tsx"() {
"use strict";
"use client";
import_react = require("react");
import_jsx_runtime = require("react/jsx-runtime");
AIContext = (0, import_react.createContext)(null);
}
});
// src/client/hooks.ts
var hooks_exports = {};
__export(hooks_exports, {
useChat: () => useChat,
useEmbeddings: () => useEmbeddings,
useGenerateAudio: () => useGenerateAudio,
useGenerateVideo: () => useGenerateVideo,
useJobStatus: () => useJobStatus
});
function useChat(initialMessages = []) {
const { chat, isLoading: globalLoading, error: globalError } = useAI();
const [messages, setMessages] = (0, import_react2.useState)(initialMessages);
const [isLoading, setIsLoading] = (0, import_react2.useState)(false);
const [error, setError] = (0, import_react2.useState)(null);
const sendMessage = (0, import_react2.useCallback)(async (content, options) => {
const userMessage = { role: "user", content };
const newMessages = [...messages, userMessage];
setMessages(newMessages);
setIsLoading(true);
setError(null);
try {
const response = await chat(newMessages, options);
if (options?.stream) {
let assistantContent = "";
const assistantMessage = { role: "assistant", content: "" };
setMessages([...newMessages, assistantMessage]);
for await (const chunk of response) {
assistantContent += chunk;
setMessages((prev) => {
const updated = [...prev];
updated[updated.length - 1] = { role: "assistant", content: assistantContent };
return updated;
});
}
} else {
const assistantMessage = {
role: "assistant",
content: response
};
setMessages([...newMessages, assistantMessage]);
}
} catch (err) {
const error2 = err instanceof Error ? err : new Error("Chat failed");
setError(error2);
setMessages(messages);
} finally {
setIsLoading(false);
}
}, [chat, messages]);
const clearMessages = (0, import_react2.useCallback)(() => {
setMessages([]);
setError(null);
}, []);
return {
messages,
isLoading: isLoading || globalLoading,
error: error || (globalError ? new Error(globalError) : null),
sendMessage,
clearMessages
};
}
function useGenerateAudio() {
const { generateAudio, isLoading: globalLoading, error: globalError } = useAI();
const [isLoading, setIsLoading] = (0, import_react2.useState)(false);
const [error, setError] = (0, import_react2.useState)(null);
const generate = (0, import_react2.useCallback)(async (prompt, options) => {
setIsLoading(true);
setError(null);
try {
const result = await generateAudio(prompt, options);
return result;
} catch (err) {
const error2 = err instanceof Error ? err : new Error("Audio generation failed");
setError(error2);
throw error2;
} finally {
setIsLoading(false);
}
}, [generateAudio]);
const clearError = (0, import_react2.useCallback)(() => {
setError(null);
}, []);
return {
generate,
isLoading: isLoading || globalLoading,
error: error || (globalError ? new Error(globalError) : null),
clearError
};
}
function useGenerateVideo() {
const { generateVideo, isLoading: globalLoading, error: globalError } = useAI();
const [isLoading, setIsLoading] = (0, import_react2.useState)(false);
const [error, setError] = (0, import_react2.useState)(null);
const generate = (0, import_react2.useCallback)(async (prompt, options) => {
setIsLoading(true);
setError(null);
try {
const result = await generateVideo(prompt, options);
return result;
} catch (err) {
const error2 = err instanceof Error ? err : new Error("Video generation failed");
setError(error2);
throw error2;
} finally {
setIsLoading(false);
}
}, [generateVideo]);
const clearError = (0, import_react2.useCallback)(() => {
setError(null);
}, []);
return {
generate,
isLoading: isLoading || globalLoading,
error: error || (globalError ? new Error(globalError) : null),
clearError
};
}
function useJobStatus(jobId, options = {}) {
const { config } = useAI();
const { pollingInterval = 2e3, enabled = true } = options;
const [status, setStatus] = (0, import_react2.useState)(null);
const [isLoading, setIsLoading] = (0, import_react2.useState)(false);
const [error, setError] = (0, import_react2.useState)(null);
const fetchStatus = (0, import_react2.useCallback)(async () => {
if (!jobId || !config.apiKey)
return;
setIsLoading(true);
setError(null);
try {
const response = await fetch(`${config.gatewayUrl}/v1/jobs/${jobId}`, {
headers: {
"Authorization": `Bearer ${config.apiKey}`
}
});
if (!response.ok) {
throw new Error(`Failed to fetch job status: ${response.statusText}`);
}
const jobStatus = await response.json();
setStatus(jobStatus);
} catch (err) {
setError(err instanceof Error ? err : new Error("Failed to fetch job status"));
} finally {
setIsLoading(false);
}
}, [jobId, config]);
(0, import_react2.useEffect)(() => {
if (!jobId || !enabled)
return;
let intervalId;
let mounted = true;
const poll = async () => {
await fetchStatus();
if (mounted && status && (status.status === "queued" || status.status === "running")) {
intervalId = setTimeout(poll, pollingInterval);
}
};
poll();
return () => {
mounted = false;
if (intervalId) {
clearTimeout(intervalId);
}
};
}, [jobId, enabled, pollingInterval, fetchStatus, status?.status]);
return {
status,
isLoading,
error,
refetch: fetchStatus,
isComplete: status?.status === "done" || status?.status === "error",
isDone: status?.status === "done",
isError: status?.status === "error"
};
}
function useEmbeddings() {
const { config } = useAI();
const generate = (0, import_react2.useCallback)(async (texts) => {
if (!config.apiKey) {
throw new Error("API key is required for embeddings");
}
const response = await fetch(`${config.gatewayUrl}/v1/embeddings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${config.apiKey}`
},
body: JSON.stringify({ texts })
});
if (!response.ok) {
throw new Error(`Embeddings request failed: ${response.statusText}`);
}
return response.json();
}, [config]);
return { generate };
}
var import_react2;
var init_hooks = __esm({
"src/client/hooks.ts"() {
"use strict";
"use client";
import_react2 = require("react");
init_AIProvider();
}
});
// src/client/clientUtils.ts
var clientUtils_exports = {};
__export(clientUtils_exports, {
clientStorage: () => clientStorage,
debounce: () => debounce,
formatErrorMessage: () => formatErrorMessage,
getAIConfigFromEnv: () => getAIConfigFromEnv,
isClientSide: () => isClientSide,
validateAIConfig: () => validateAIConfig
});
function validateAIConfig(config) {
const errors = [];
if (!config.apiKey) {
errors.push("API key is required");
}
if (!config.gatewayUrl) {
errors.push("Gateway URL is required");
}
if (config.gatewayUrl && !isValidUrl(config.gatewayUrl)) {
errors.push("Gateway URL must be a valid URL");
}
return {
isValid: errors.length === 0,
errors
};
}
function getAIConfigFromEnv() {
return {
apiKey: process.env.NEXT_PUBLIC_AI_API_KEY || "",
gatewayUrl: process.env.NEXT_PUBLIC_AI_GATEWAY_URL || "https://ai-gateway.digilogiclabs.com",
environment: process.env.NODE_ENV === "production" ? "production" : "development"
};
}
function isClientSide() {
return typeof window !== "undefined";
}
function formatErrorMessage(error) {
if (error instanceof Error) {
return error.message;
}
if (typeof error === "string") {
return error;
}
return "An unknown error occurred";
}
function debounce(func, wait) {
let timeout;
return (...args) => {
clearTimeout(timeout);
timeout = setTimeout(() => func(...args), wait);
};
}
function isValidUrl(string) {
try {
new URL(string);
return true;
} catch {
return false;
}
}
var clientStorage;
var init_clientUtils = __esm({
"src/client/clientUtils.ts"() {
"use strict";
"use client";
clientStorage = {
get: (key) => {
if (!isClientSide())
return null;
try {
return localStorage.getItem(key);
} catch {
return null;
}
},
set: (key, value) => {
if (!isClientSide())
return;
try {
localStorage.setItem(key, value);
} catch {
}
},
remove: (key) => {
if (!isClientSide())
return;
try {
localStorage.removeItem(key);
} catch {
}
}
};
}
});
// src/nextjs15-index.ts
var nextjs15_index_exports = {};
__export(nextjs15_index_exports, {
AICapability: () => AICapability,
AIProvider: () => AIProvider2,
clientStorage: () => clientStorage2,
createAIHub: () => createAIHub2,
getAIConfigFromEnv: () => getAIConfigFromEnv2,
isClientSide: () => isClientSide2,
isServerSide: () => isServerSide,
quickStartAI: () => quickStartAI2,
simpleAnalysis: () => simpleAnalysis2,
simpleChat: () => simpleChat2,
simpleVideoGeneration: () => simpleVideoGeneration2,
useAI: () => useAI2,
useChat: () => useChat2,
useEmbeddings: () => useEmbeddings2,
useGenerateAudio: () => useGenerateAudio2,
useGenerateVideo: () => useGenerateVideo2,
useJobStatus: () => useJobStatus2,
validateAIConfig: () => validateAIConfig2
});
module.exports = __toCommonJS(nextjs15_index_exports);
// src/shared/index.ts
init_types();
init_types();
// src/nextjs15-index.ts
var isServer = typeof window === "undefined";
var quickStartAI2 = isServer ? (init_quickStartAI(), __toCommonJS(quickStartAI_exports)).quickStartAI : void 0;
var simpleChat2 = isServer ? (init_quickStartAI(), __toCommonJS(quickStartAI_exports)).simpleChat : void 0;
var simpleVideoGeneration2 = isServer ? (init_quickStartAI(), __toCommonJS(quickStartAI_exports)).simpleVideoGeneration : void 0;
var simpleAnalysis2 = isServer ? (init_quickStartAI(), __toCommonJS(quickStartAI_exports)).simpleAnalysis : void 0;
var createAIHub2 = isServer ? (init_aiUtils(), __toCommonJS(aiUtils_exports)).createAIHub : void 0;
var AIProvider2 = !isServer ? (init_AIProvider(), __toCommonJS(AIProvider_exports)).AIProvider : void 0;
var useAI2 = !isServer ? (init_AIProvider(), __toCommonJS(AIProvider_exports)).useAI : void 0;
var useChat2 = !isServer ? (init_hooks(), __toCommonJS(hooks_exports)).useChat : void 0;
var useGenerateAudio2 = !isServer ? (init_hooks(), __toCommonJS(hooks_exports)).useGenerateAudio : void 0;
var useGenerateVideo2 = !isServer ? (init_hooks(), __toCommonJS(hooks_exports)).useGenerateVideo : void 0;
var useJobStatus2 = !isServer ? (init_hooks(), __toCommonJS(hooks_exports)).useJobStatus : void 0;
var useEmbeddings2 = !isServer ? (init_hooks(), __toCommonJS(hooks_exports)).useEmbeddings : void 0;
var validateAIConfig2 = !isServer ? (init_clientUtils(), __toCommonJS(clientUtils_exports)).validateAIConfig : void 0;
var getAIConfigFromEnv2 = !isServer ? (init_clientUtils(), __toCommonJS(clientUtils_exports)).getAIConfigFromEnv : void 0;
var clientStorage2 = !isServer ? (init_clientUtils(), __toCommonJS(clientUtils_exports)).clientStorage : void 0;
var isServerSide = isServer;
var isClientSide2 = !isServer;
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
AICapability,
AIProvider,
clientStorage,
createAIHub,
getAIConfigFromEnv,
isClientSide,
isServerSide,
quickStartAI,
simpleAnalysis,
simpleChat,
simpleVideoGeneration,
useAI,
useChat,
useEmbeddings,
useGenerateAudio,
useGenerateVideo,
useJobStatus,
validateAIConfig
});
;