UNPKG

@mvkproject/nexus

Version:

Free AI SDK with API key (500 free daily requests). Access 25+ LLM models (GPT-4, Gemini, Llama, DeepSeek), generate images with 14+ models (Flux, Stable Diffusion), and integrate Akinator game - all completely free.

228 lines (189 loc) • 8.88 kB
import NexusClient from '../src/index.js'; const NEXUS_API_KEY = process.env.NEXUS_API_KEY; if (!NEXUS_API_KEY) { console.error('āŒ Error: NEXUS_API_KEY is not configured'); console.log('Please configure your API key as an environment variable'); process.exit(1); } const client = new NexusClient({ apiKey: NEXUS_API_KEY }); console.log('šŸš€ Starting Nexus SDK tests...\n'); async function testTextGeneration() { console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('šŸ“ TEST 1: Text Generation'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); try { const response = await client.text.generate({ model: 'gemini-2.5-flash', prompt: 'Explain in one sentence what artificial intelligence is.', temperature: 0.7, maxOutputTokens: 100 }); console.log('āœ… Text generation successful'); console.log('šŸ“„ Response:', response.completion); console.log('šŸ”§ Model used:', response.model); console.log('šŸ“Š Tokens:', response.usage?.totalTokens || 'N/A'); return true; } catch (error) { console.error('āŒ Error in text generation:', error.message); return false; } } async function testTextStreaming() { console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('🌊 TEST 2: Text Streaming'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); try { let fullText = ''; process.stdout.write('šŸ“„ Real-time response: '); await client.text.generateStream({ model: 'gemini-2.5-flash', prompt: 'Count to 5 in English.', temperature: 0.7 }, (chunk) => { process.stdout.write(chunk); fullText += chunk; }); console.log('\nāœ… Text streaming successful'); console.log('šŸ“ Total length:', fullText.length, 'characters'); return true; } catch (error) { console.error('\nāŒ Error in text streaming:', error.message); return false; } } async function testImageGeneration() { console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('šŸŽØ TEST 3: Image Generation'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); try { const response = await client.image.generate({ prompt: 'An adorable kitten playing with a ball', model: 'flux', width: 512, height: 512 }); console.log('āœ… Image generation successful'); console.log('šŸ–¼ļø Image URL:', response.imageUrl); console.log('šŸ“ Size:', response.size); console.log('šŸ”§ Model used:', response.model); console.log('ā° Expires in:', response.expiresIn); return true; } catch (error) { console.error('āŒ Error in image generation:', error.message); return false; } } async function testOpenAIFormat() { console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('šŸ”„ TEST 4: OpenAI Compatible Format'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); try { const response = await client.text.generate({ model: 'llama-3.3-70b-instruct', messages: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'What is the capital of France?' } ], temperature: 0.7 }); console.log('āœ… OpenAI format successful'); console.log('šŸ“„ Response:', response.completion); console.log('šŸ”§ Model used:', response.model); return true; } catch (error) { console.error('āŒ Error in OpenAI format:', error.message); return false; } } async function testConversationHistory() { console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('šŸ’¬ TEST 5: Conversation History (Prompt)'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); const userId = 'test-user-' + Date.now(); try { console.log('šŸ“ First question with history...'); const response1 = await client.text.generate({ model: 'gemini-2.5-flash', prompt: 'My favorite color is blue.', userid: userId, temperature: 0.7 }); console.log('āœ… First response:', response1.completion); console.log('\nšŸ“ Second question using context...'); const response2 = await client.text.generate({ model: 'gemini-2.5-flash', prompt: 'What is my favorite color?', userid: userId, temperature: 0.7 }); console.log('āœ… Second response (should mention blue):', response2.completion); return true; } catch (error) { console.error('āŒ Error in conversation history:', error.message); return false; } } async function testConversationHistoryOpenAI() { console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('šŸ’¬ TEST 6: Conversation History (OpenAI Format - Manual)'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); try { console.log('šŸ“ First conversation with messages...'); const response1 = await client.text.generate({ model: 'llama-3.3-70b-instruct', messages: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Hello, my name is Carlos and I am a JavaScript developer.' } ], temperature: 0.7 }); console.log('āœ… First response:', response1.completion); console.log('\nšŸ“ Second conversation - manually including history...'); const response2 = await client.text.generate({ model: 'llama-3.3-70b-instruct', messages: [ { role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Hello, my name is Carlos and I am a JavaScript developer.' }, { role: 'assistant', content: response1.completion }, // Manual history { role: 'user', content: 'What is my name and what do I do?' } ], temperature: 0.7 }); console.log('āœ… Second response (should mention Carlos and JavaScript):', response2.completion); return true; } catch (error) { console.error('āŒ Error in OpenAI history:', error.message); return false; } } async function runAllTests() { const results = []; results.push({ name: 'Text Generation', success: await testTextGeneration() }); results.push({ name: 'Text Streaming', success: await testTextStreaming() }); results.push({ name: 'Image Generation', success: await testImageGeneration() }); results.push({ name: 'OpenAI Format', success: await testOpenAIFormat() }); results.push({ name: 'Conversation History (Prompt + userid)', success: await testConversationHistory() }); results.push({ name: 'Conversation History (OpenAI Manual)', success: await testConversationHistoryOpenAI() }); console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log('šŸ“Š TEST SUMMARY'); console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); const passed = results.filter(r => r.success).length; const total = results.length; results.forEach(result => { const icon = result.success ? 'āœ…' : 'āŒ'; console.log(`${icon} ${result.name}`); }); console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); console.log(`šŸ“ˆ Result: ${passed}/${total} tests passed`); if (passed === total) { console.log('šŸŽ‰ All tests passed! The SDK is ready to publish.'); } else { console.log('āš ļø Some tests failed. Review the errors before publishing.'); } console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n'); process.exit(passed === total ? 0 : 1); } runAllTests().catch(error => { console.error('\nšŸ’„ Fatal error:', error); process.exit(1); });