@mvkproject/nexus
Version:
Free AI SDK with API key (500 free daily requests). Access 25+ LLM models (GPT-4, Gemini, Llama, DeepSeek), generate images with 14+ models (Flux, Stable Diffusion), and integrate Akinator game - all completely free.
228 lines (189 loc) ⢠8.88 kB
JavaScript
import NexusClient from '../src/index.js';
const NEXUS_API_KEY = process.env.NEXUS_API_KEY;
if (!NEXUS_API_KEY) {
console.error('ā Error: NEXUS_API_KEY is not configured');
console.log('Please configure your API key as an environment variable');
process.exit(1);
}
const client = new NexusClient({ apiKey: NEXUS_API_KEY });
console.log('š Starting Nexus SDK tests...\n');
async function testTextGeneration() {
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('š TEST 1: Text Generation');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
try {
const response = await client.text.generate({
model: 'gemini-2.5-flash',
prompt: 'Explain in one sentence what artificial intelligence is.',
temperature: 0.7,
maxOutputTokens: 100
});
console.log('ā
Text generation successful');
console.log('š Response:', response.completion);
console.log('š§ Model used:', response.model);
console.log('š Tokens:', response.usage?.totalTokens || 'N/A');
return true;
} catch (error) {
console.error('ā Error in text generation:', error.message);
return false;
}
}
async function testTextStreaming() {
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('š TEST 2: Text Streaming');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
try {
let fullText = '';
process.stdout.write('š Real-time response: ');
await client.text.generateStream({
model: 'gemini-2.5-flash',
prompt: 'Count to 5 in English.',
temperature: 0.7
}, (chunk) => {
process.stdout.write(chunk);
fullText += chunk;
});
console.log('\nā
Text streaming successful');
console.log('š Total length:', fullText.length, 'characters');
return true;
} catch (error) {
console.error('\nā Error in text streaming:', error.message);
return false;
}
}
async function testImageGeneration() {
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('šØ TEST 3: Image Generation');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
try {
const response = await client.image.generate({
prompt: 'An adorable kitten playing with a ball',
model: 'flux',
width: 512,
height: 512
});
console.log('ā
Image generation successful');
console.log('š¼ļø Image URL:', response.imageUrl);
console.log('š Size:', response.size);
console.log('š§ Model used:', response.model);
console.log('ā° Expires in:', response.expiresIn);
return true;
} catch (error) {
console.error('ā Error in image generation:', error.message);
return false;
}
}
async function testOpenAIFormat() {
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('š TEST 4: OpenAI Compatible Format');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
try {
const response = await client.text.generate({
model: 'llama-3.3-70b-instruct',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' }
],
temperature: 0.7
});
console.log('ā
OpenAI format successful');
console.log('š Response:', response.completion);
console.log('š§ Model used:', response.model);
return true;
} catch (error) {
console.error('ā Error in OpenAI format:', error.message);
return false;
}
}
async function testConversationHistory() {
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('š¬ TEST 5: Conversation History (Prompt)');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
const userId = 'test-user-' + Date.now();
try {
console.log('š First question with history...');
const response1 = await client.text.generate({
model: 'gemini-2.5-flash',
prompt: 'My favorite color is blue.',
userid: userId,
temperature: 0.7
});
console.log('ā
First response:', response1.completion);
console.log('\nš Second question using context...');
const response2 = await client.text.generate({
model: 'gemini-2.5-flash',
prompt: 'What is my favorite color?',
userid: userId,
temperature: 0.7
});
console.log('ā
Second response (should mention blue):', response2.completion);
return true;
} catch (error) {
console.error('ā Error in conversation history:', error.message);
return false;
}
}
async function testConversationHistoryOpenAI() {
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('š¬ TEST 6: Conversation History (OpenAI Format - Manual)');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
try {
console.log('š First conversation with messages...');
const response1 = await client.text.generate({
model: 'llama-3.3-70b-instruct',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Hello, my name is Carlos and I am a JavaScript developer.' }
],
temperature: 0.7
});
console.log('ā
First response:', response1.completion);
console.log('\nš Second conversation - manually including history...');
const response2 = await client.text.generate({
model: 'llama-3.3-70b-instruct',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Hello, my name is Carlos and I am a JavaScript developer.' },
{ role: 'assistant', content: response1.completion }, // Manual history
{ role: 'user', content: 'What is my name and what do I do?' }
],
temperature: 0.7
});
console.log('ā
Second response (should mention Carlos and JavaScript):', response2.completion);
return true;
} catch (error) {
console.error('ā Error in OpenAI history:', error.message);
return false;
}
}
async function runAllTests() {
const results = [];
results.push({ name: 'Text Generation', success: await testTextGeneration() });
results.push({ name: 'Text Streaming', success: await testTextStreaming() });
results.push({ name: 'Image Generation', success: await testImageGeneration() });
results.push({ name: 'OpenAI Format', success: await testOpenAIFormat() });
results.push({ name: 'Conversation History (Prompt + userid)', success: await testConversationHistory() });
results.push({ name: 'Conversation History (OpenAI Manual)', success: await testConversationHistoryOpenAI() });
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log('š TEST SUMMARY');
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
const passed = results.filter(r => r.success).length;
const total = results.length;
results.forEach(result => {
const icon = result.success ? 'ā
' : 'ā';
console.log(`${icon} ${result.name}`);
});
console.log('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā');
console.log(`š Result: ${passed}/${total} tests passed`);
if (passed === total) {
console.log('š All tests passed! The SDK is ready to publish.');
} else {
console.log('ā ļø Some tests failed. Review the errors before publishing.');
}
console.log('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā\n');
process.exit(passed === total ? 0 : 1);
}
runAllTests().catch(error => {
console.error('\nš„ Fatal error:', error);
process.exit(1);
});