UNPKG

@simpleapps-com/augur-api

Version:

TypeScript client library for Augur microservices API endpoints

108 lines 5.29 kB
import { OllamaGenerateRequestParamsSchema, OllamaGenerateResponseSchema, } from '../schemas/ollamaGenerate'; /** * Creates the ollama.generate resource methods * OpenAPI Path: /ollama/generate → ollama.generate.* * @description Ollama local AI model generation functionality */ export function createOllamaGenerateResource(executeRequest) { return { /** * Generate content using local Ollama AI models * * @fullPath api.gregorovich.ollama.generate.create * @service gregorovich * @domain ai-generation * @dataMethod ollamaData.generate.create * @discoverable true * @searchTerms ["ollama", "generate", "ai", "local", "llm", "model", "text", "completion", "generation"] * @relatedEndpoints ["api.gregorovich.chatGpt.ask.get", "api.p21Pim.ai.suggestions.get", "api.agrSite.ai.transcripts.create", "api.gregorovich.documents.list"] * @commonPatterns ["Generate AI content", "Local AI processing", "Text completion", "AI writing assistant", "Custom model inference", "Private AI generation"] * @workflow ["content-generation", "ai-assistance", "text-processing", "creative-writing", "code-generation", "document-analysis"] * @prerequisites ["Public bearer token", "x-site-id header", "Model name", "Generation prompt", "Ollama service availability"] * @nextSteps ["Process generated content", "Refine generation parameters", "Chain multiple generations", "Store generation results"] * @businessRules ["Requires model and prompt parameters", "Supports local model execution", "Configurable generation parameters", "Token and context management", "Performance monitoring"] * @functionalArea "ai-and-automation" * @crossSite "Multi-site AI generation support" * @caching "Cache responses for 30 minutes for identical prompts and parameters" * @performance "Local processing, faster than external APIs but depends on model size and complexity" * * @param params Ollama generation parameters including model, prompt, and generation configuration * @returns Promise<OllamaGenerateResponse> Complete AI generation result with content, metadata, and performance metrics * * @example * ```typescript * // Simple text generation * const generation = await client.ollama.generate.create({ * model: 'llama2', * prompt: 'Write a short story about a robot learning to paint' * }); * console.log(generation.data.response); // Generated story * console.log(generation.data.total_duration); // Generation time * * // Get just the generated content * const story = await client.ollamaData.generate.create({ * model: 'codellama', * prompt: 'Create a Python function to calculate fibonacci numbers' * }); * console.log(story.response); // Direct access to generated code * * // Advanced generation with parameters * const technicalDoc = await client.ollamaData.generate.create({ * model: 'mistral', * prompt: 'Explain machine learning algorithms for beginners', * temperature: 0.3, * max_tokens: 1500, * system: 'You are a patient teacher who explains complex topics simply', * format: 'json', * top_p: 0.9, * seed: 42 * }); * * // Creative writing with higher randomness * const creativeContent = await client.ollamaData.generate.create({ * model: 'neural-chat', * prompt: 'Write a product description for a smart home device', * temperature: 0.8, * top_k: 40, * repeat_penalty: 1.1, * stop: ['\\n\\n', 'END'], * sessionId: 'creative-session-1' * }); * * // Conversation with context * const conversational = await client.ollamaData.generate.create({ * model: 'llama2-chat', * prompt: 'What are the benefits of renewable energy?', * context: previousContext, // Array of context tokens * conversationId: 'energy-discussion', * num_predict: 200 * }); * ``` */ create: async (params) => { return executeRequest({ method: 'POST', path: '/ollama/generate', paramsSchema: OllamaGenerateRequestParamsSchema, responseSchema: OllamaGenerateResponseSchema, }, params); }, }; } /** * Creates the ollamaData.generate resource methods (data-only versions) */ export function createOllamaGenerateDataResource(ollamaGenerate) { return { /** * Generate content with Ollama and return generation result data only * @param params Ollama generation parameters * @returns Promise<OllamaGenerateResult> Generated content with metadata and performance metrics */ create: async (params) => { const response = await ollamaGenerate.create(params); return response.data; }, }; } //# sourceMappingURL=ollama-generate.js.map