@makolabs/ripple
Version:
Simple Svelte 5 powered component library ✨
574 lines (573 loc) • 23.8 kB
JavaScript
import { browser } from '$app/environment';
import { AIConfigurationError, AINetworkError, AIRateLimitError } from './types.js';
/**
* OpenAI adapter with simple sliding window memory
* Handles communication with OpenAI's GPT models with AI-generated summaries
*/
export class OpenAIAdapter {
config;
messages = [];
summary = '';
userProfile = {};
maxMessages = 45;
constructor(config) {
// Set defaults
this.config = {
model: 'gpt-5', // make this configurable
baseUrl: 'https://api.openai.com/v1', // make this configurable
systemPrompt: this.getDefaultSystemPrompt(),
temperature: 0.7,
maxTokens: 4000,
maintainHistory: true,
maxHistoryLength: 20,
...config
};
if (!this.config.apiKey) {
throw new AIConfigurationError('OpenAI API key is required', this.getName());
}
// Load from storage
this.loadFromStorage();
}
getName() {
return 'OpenAI';
}
isConfigured() {
return !!this.config.apiKey && browser;
}
getDefaultSystemPrompt() {
return `You are a helpful AI assistant. Be concise and accurate.`;
}
getCodeRules() {
return `
CODE RULES:
- ALL code must use \`\`\`language blocks
- Always specify language: python, javascript, bash, etc.
- Never write code without fences`;
}
getMermaidRules() {
return `
MERMAID RULES:
- Use \`\`\`mermaid for all diagrams
- FLOWCHARTS: Use 'graph TB' or 'graph LR' (NOT 'flowchart TD')
- Node labels: Keep simple, avoid special chars like (){}[]<>
- NO multi-line text or <br/> tags in node labels
- Use underscores instead of spaces in node IDs
- Node shapes: [text] rectangles, {text} diamonds, (text) rounded, ((text)) circles
- Connections: --> for arrows, --- for lines
- Styling: Use 'style NodeID fill:#color' format
- Keep syntax minimal and parser-friendly
- Example:
\`\`\`mermaid
graph TB
A[Start] --> B[Process]
B --> C{Decision}
C -->|Yes| D[End]
\`\`\``;
}
buildConditionalSystemPrompt(userMessage) {
let prompt = this.getDefaultSystemPrompt();
// Check if user message contains code-related keywords
const codeKeywords = /\b(code|function|class|variable|script|program|algorithm|syntax|debug|compile|execute|run|install|import|export|api|method|loop|condition|array|object|string|number|boolean|return|print|console|log|error|exception|try|catch|if|else|for|while|switch|case|break|continue|def|var|let|const|async|await|promise|callback|json|xml|html|css|javascript|python|java|cpp|csharp|php|ruby|go|rust|swift|kotlin|typescript|bash|shell|sql|yaml|dockerfile|markdown)\b/i;
// Check if user message contains mermaid/diagram keywords
const mermaidKeywords = /\b(mermaid|diagram|flowchart|chart|graph|flow|sequence|class|state|entity|relationship|gantt|pie|journey|gitgraph|mindmap|visualization|visualize|draw|show|create.*diagram|generate.*chart|make.*flow|workflow|process|architecture|schema|database|user.*journey|state.*machine|git.*flow|branch|timeline|roadmap)\b/i;
if (codeKeywords.test(userMessage)) {
prompt += this.getCodeRules();
}
if (mermaidKeywords.test(userMessage)) {
prompt += this.getMermaidRules();
}
// Add footer only if rules were added
if (codeKeywords.test(userMessage) || mermaidKeywords.test(userMessage)) {
prompt += `\n\nThese formatting rules are required for proper UI rendering.`;
}
return prompt;
}
async sendMessage(message, context) {
if (!this.isConfigured()) {
throw new AIConfigurationError('OpenAI adapter not properly configured', this.getName());
}
try {
const thinkingMode = context?.thinkingMode;
// Build context from summary + recent messages
const memoryContext = this.buildMemoryContext();
const enhancedSystemPrompt = this.buildEnhancedSystemPrompt(memoryContext, message);
// Build messages array for OpenAI
const messages = [
{
role: 'system',
content: enhancedSystemPrompt
},
{
role: 'user',
content: message
}
];
// Prepare request body - reasoning models have different parameter requirements
const requestBody = {
model: this.config.model,
input: messages,
max_output_tokens: this.config.maxTokens,
...(thinkingMode && {
reasoning: {
effort: 'medium',
summary: 'auto'
}
})
};
// Call OpenAI API directly
const response = await fetch(`${this.config.baseUrl}/responses`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
if (response.status === 429) {
throw new AIRateLimitError('Rate limit exceeded', this.getName());
}
const errorData = await response.json().catch(() => ({}));
throw new AINetworkError(`OpenAI API error: ${response.status} - ${errorData.error?.message || 'Unknown error'}`, this.getName());
}
const openAIResponse = await response.json();
if (openAIResponse.error) {
throw new AINetworkError(`OpenAI API error: ${openAIResponse.error.message}`, this.getName());
}
if (!openAIResponse.output || openAIResponse.output.length === 0) {
throw new AINetworkError('No response from OpenAI', this.getName());
}
// Find reasoning and message content from output array
let aiResponseContent = '';
let reasoningContent = '';
for (const outputItem of openAIResponse.output) {
if (outputItem.type === 'reasoning' && outputItem.summary) {
// Extract reasoning summary text
reasoningContent = outputItem.summary
.filter((s) => s.type === 'summary_text')
.map((s) => s.text)
.join('');
}
else if (outputItem.type === 'message' && outputItem.content) {
// Extract message content text
aiResponseContent = outputItem.content
.filter((c) => c.type === 'output_text')
.map((c) => c.text)
.join('');
}
}
// Create chat response
const chatResponse = {
type: 'chat',
content: aiResponseContent,
thinkingContent: reasoningContent,
isThinkingComplete: !!reasoningContent
};
if (!chatResponse.type || !chatResponse.content) {
throw new AINetworkError('Invalid response format', this.getName());
}
// Add messages to memory with sliding window logic
if (this.config.maintainHistory) {
this.addToMemory(message, aiResponseContent, reasoningContent);
}
return chatResponse;
}
catch (error) {
if (error instanceof AIConfigurationError ||
error instanceof AINetworkError ||
error instanceof AIRateLimitError) {
throw error;
}
console.error('OpenAI adapter error:', error);
throw new AINetworkError(`Unexpected error: ${error instanceof Error ? error.message : 'Unknown error'}`, this.getName());
}
}
async sendMessageStream(message, onStream, context) {
if (!this.isConfigured()) {
throw new AIConfigurationError('OpenAI adapter not properly configured', this.getName());
}
try {
const thinkingMode = context?.thinkingMode;
// Build context from summary + recent messages
const memoryContext = this.buildMemoryContext();
const enhancedSystemPrompt = this.buildEnhancedSystemPrompt(memoryContext, message);
// Build messages array for OpenAI
const messages = [
{
role: 'system',
content: enhancedSystemPrompt
},
{
role: 'user',
content: message
}
];
// Generate unique message ID for this streaming response
const messageId = this.generateId();
let fullContent = '';
let fullReasoning = '';
// Prepare streaming request body - reasoning models have different parameter requirements
const requestBody = {
model: this.config.model,
input: messages,
stream: true,
max_output_tokens: this.config.maxTokens,
...(thinkingMode && {
reasoning: {
effort: 'medium',
summary: 'auto'
}
})
};
// Call OpenAI API with streaming
const response = await fetch(`${this.config.baseUrl}/responses`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
if (response.status === 429) {
throw new AIRateLimitError('Rate limit exceeded', this.getName());
}
const errorData = await response.json().catch(() => ({}));
throw new AINetworkError(`OpenAI API error: ${response.status} - ${errorData.error?.message || 'Unknown error'}`, this.getName());
}
if (!response.body) {
throw new AINetworkError('No response body from OpenAI', this.getName());
}
// Process the stream
const reader = response.body.getReader();
const decoder = new TextDecoder();
try {
while (true) {
const { done, value } = await reader.read();
if (done)
break;
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('event: ') || line.startsWith('data: ')) {
// Skip event type lines, process data lines
if (line.startsWith('data: ')) {
const data = line.slice(6);
try {
const parsed = JSON.parse(data);
// Handle reasoning summary text deltas
if (parsed.type === 'response.reasoning_summary_text.delta') {
if (parsed.delta) {
fullReasoning += parsed.delta;
// Send reasoning update
onStream({
type: 'chat',
content: fullContent,
messageId,
thinkingContent: fullReasoning,
isThinkingComplete: false,
isStreaming: true,
isStreamEnd: false
});
}
}
// Handle output text deltas (main content)
else if (parsed.type === 'response.output_text.delta') {
if (parsed.delta) {
fullContent += parsed.delta;
// Send content update
onStream({
type: 'chat',
content: fullContent,
messageId,
thinkingContent: fullReasoning || undefined,
isThinkingComplete: false,
isStreaming: true,
isStreamEnd: false
});
}
}
// Handle completion
else if (parsed.type === 'response.completed') {
// Send final streaming response
onStream({
type: 'chat',
content: fullContent,
messageId,
thinkingContent: fullReasoning || undefined,
isThinkingComplete: !!fullReasoning,
isStreaming: true,
isStreamEnd: true
});
break;
}
}
catch {
// Skip invalid JSON chunks
continue;
}
}
}
}
}
}
finally {
reader.releaseLock();
}
// Create final response
const chatResponse = {
type: 'chat',
content: fullContent,
messageId,
thinkingContent: fullReasoning || undefined,
isThinkingComplete: !!fullReasoning,
isStreaming: false,
isStreamEnd: true
};
// Add messages to memory with sliding window logic
if (this.config.maintainHistory) {
await this.addToMemory(message, fullContent, fullReasoning);
}
return chatResponse;
}
catch (error) {
if (error instanceof AIConfigurationError ||
error instanceof AINetworkError ||
error instanceof AIRateLimitError) {
throw error;
}
console.error('OpenAI streaming adapter error:', error);
throw new AINetworkError(`Unexpected streaming error: ${error instanceof Error ? error.message : 'Unknown error'}`, this.getName());
}
}
getHistory() {
return this.messages;
}
clearHistory() {
this.messages = [];
this.summary = '';
this.userProfile = {};
this.saveToStorage();
}
setSystemPrompt(prompt) {
this.config.systemPrompt = prompt;
}
getSystemPrompt() {
return this.config.systemPrompt;
}
/**
* Update configuration
*/
updateConfig(newConfig) {
this.config = { ...this.config, ...newConfig };
}
/**
* Get current configuration (without API key for security)
*/
getConfig() {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { apiKey, ...configWithoutKey } = this.config;
return configWithoutKey;
}
/**
* Add message to memory with sliding window + AI summary logic
*/
async addToMemory(userMessage, aiResponse, reasoning) {
// Add messages
this.messages.push({
id: this.generateId(),
type: 'chat',
content: `User: ${userMessage}`,
timestamp: new Date()
});
this.messages.push({
id: this.generateId(),
type: 'chat',
content: `${aiResponse}`,
timestamp: new Date(),
...(reasoning && {
thinkingContent: reasoning,
isThinkingComplete: true
})
});
// Extract user name if available
this.extractUserInfo(userMessage);
// Check if we need to summarize (at maxMessages - 1)
if (this.messages.length >= this.maxMessages - 1) {
await this.createSummaryAndTruncate();
}
this.saveToStorage();
}
/**
* Create AI summary and truncate old messages
*/
async createSummaryAndTruncate() {
try {
// Build conversation text
const conversationText = this.messages.map((msg) => msg.content).join('\n');
// Create summary prompt
let summaryPrompt = 'Summarize this conversation preserving key details like names, preferences, established facts, and context:\n\n';
if (this.summary) {
summaryPrompt += `Previous summary: ${this.summary}\n\nRecent conversation:\n`;
}
summaryPrompt += conversationText + '\n\nConcise summary:';
// Generate summary using AI
const newSummary = await this.generateSummaryWithAI(summaryPrompt);
if (newSummary) {
this.summary = newSummary;
// Keep only last 2 messages
this.messages = this.messages.slice(-2);
}
}
catch (error) {
console.warn('Failed to create summary:', error);
}
}
/**
* Generate summary using OpenAI
*/
async generateSummaryWithAI(prompt) {
try {
const response = await fetch(`${this.config.baseUrl}/responses`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`
},
body: JSON.stringify({
model: this.config.model,
input: [
{
role: 'system',
content: 'You are a helpful assistant that creates concise conversation summaries.'
},
{ role: 'user', content: prompt }
],
temperature: 0.3,
max_tokens: 200
})
});
if (response.ok) {
const data = await response.json();
// Extract content from the new response format
for (const outputItem of data.output || []) {
if (outputItem.type === 'message' && outputItem.content) {
const content = outputItem.content
.filter((c) => c.type === 'output_text')
.map((c) => c.text)
.join('');
return content.trim() || null;
}
}
return null;
}
}
catch (error) {
console.error('AI summary generation failed:', error);
}
return null;
}
/**
* Build memory context from summary + recent messages
*/
buildMemoryContext() {
let context = '';
// Add user info
if (this.userProfile.name) {
context += `User: ${this.userProfile.name}\n`;
}
// Add summary
if (this.summary) {
context += `\nPrevious conversation summary:\n${this.summary}\n`;
}
// Add recent messages
if (this.messages.length > 0) {
context += '\nRecent conversation:\n';
this.messages.forEach((msg) => {
context += `${msg.content}\n`;
});
}
return context.trim();
}
/**
* Extract user information
*/
extractUserInfo(message) {
const nameMatch = message.match(/(?:my name is|i'm|i am|call me)\s+([a-zA-Z0-9_]+)/i);
if (nameMatch && nameMatch[1]) {
this.userProfile.name = nameMatch[1];
}
}
/**
* Save to localStorage
*/
saveToStorage() {
if (typeof localStorage === 'undefined')
return;
try {
const data = {
messages: this.messages,
summary: this.summary,
userProfile: this.userProfile
};
localStorage.setItem(`openai-memory-${this.config.apiKey.slice(-8)}`, JSON.stringify(data));
}
catch (error) {
console.warn('Failed to save memory:', error);
}
}
/**
* Load from localStorage
*/
loadFromStorage() {
if (typeof localStorage === 'undefined')
return;
try {
const stored = localStorage.getItem(`openai-memory-${this.config.apiKey.slice(-8)}`);
if (stored) {
const data = JSON.parse(stored);
this.messages = (data.messages || []).map((msg) => ({
...msg,
timestamp: new Date(msg.timestamp),
// Ensure thinking content is marked as complete when loaded from storage
...(msg.thinkingContent && { isThinkingComplete: true })
}));
this.summary = data.summary || '';
this.userProfile = data.userProfile || {};
}
}
catch (error) {
console.warn('Failed to load memory:', error);
}
}
generateId() {
return `msg_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
}
/**
* Build enhanced system prompt with memory context
*/
buildEnhancedSystemPrompt(memoryContext, userMessage) {
let enhancedPrompt = this.buildConditionalSystemPrompt(userMessage);
if (memoryContext.trim()) {
enhancedPrompt += '\n\n--- CONVERSATION CONTEXT ---\n' + memoryContext;
enhancedPrompt +=
'\n\nIMPORTANT: Use the context above to maintain consistency. If you have an established identity, age, or preferences, continue using them. Reference previous conversations when relevant.';
}
return enhancedPrompt;
}
/**
* Get memory statistics
*/
getMemoryStats() {
return {
totalMessages: this.messages.length,
hasSummary: this.summary.length > 0
};
}
/**
* Get the context that will be sent to AI
*/
getContextForAI() {
return this.buildMemoryContext();
}
}