@jackhua/mini-langchain
Version:
A lightweight TypeScript implementation of LangChain with cost optimization features
57 lines ⢠2.38 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
const dotenv_1 = require("dotenv");
const gemini_1 = require("./llms/gemini");
const prompt_1 = require("./prompts/prompt");
const llm_1 = require("./chains/llm");
// Load environment variables
(0, dotenv_1.config)();
async function main() {
try {
console.log('š Starting Gemini Example...\n');
// Initialize Gemini LLM
const llm = new gemini_1.Gemini({
apiKey: process.env.GEMINI_API_KEY,
model: 'gemini-1.5-flash',
defaultTemperature: 0.7
});
// Example 1: Simple prompt
console.log('=== Example 1: Simple Question ===');
const simpleResult = await llm.call('What is the capital of Japan?');
console.log('Answer:', simpleResult);
// Example 2: Using prompt template
console.log('\n=== Example 2: Prompt Template ===');
const prompt = prompt_1.PromptTemplate.fromTemplate("Generate a creative name for a {type} restaurant that serves {cuisine} food.");
const chain = new llm_1.LLMChain({ llm, prompt });
const result = await chain.call({
type: "family-friendly",
cuisine: "Italian"
});
console.log('Restaurant name:', result.text);
// Example 3: Multi-turn conversation
console.log('\n=== Example 3: Multi-turn Conversation ===');
const messages = [
{ type: 'human', content: 'Hi! Can you help me plan a trip to Tokyo?' },
];
const response1 = await llm.generate(messages);
console.log('AI:', response1.text);
// Add AI response to conversation
if (response1.message) {
messages.push(response1.message);
}
messages.push({ type: 'human', content: 'What are the must-see attractions there?' });
const response2 = await llm.generate(messages);
console.log('\nHuman: What are the must-see attractions there?');
console.log('AI:', response2.text);
console.log('\nā
Gemini example completed successfully!');
}
catch (error) {
console.error('ā Error:', error.message || error);
if (error.response?.data) {
console.error('Response data:', error.response.data);
}
}
}
// Run the example
main();
//# sourceMappingURL=example-gemini.js.map