@simpleapps-com/augur-api
Version:
TypeScript client library for Augur microservices API endpoints
110 lines • 5.43 kB
TypeScript
import type { OllamaGenerateRequestParams, OllamaGenerateResult } from '../schemas';
import type { EndpointConfig } from '../../../core/base-client';
type BoundExecuteRequest = <TParams, TResponse>(config: EndpointConfig<TParams, TResponse>, params?: TParams, pathParams?: Record<string, string>) => Promise<TResponse>;
/**
* Creates the ollama.generate resource methods
* OpenAPI Path: /ollama/generate → ollama.generate.*
* @description Ollama local AI model generation functionality
*/
export declare function createOllamaGenerateResource(executeRequest: BoundExecuteRequest): {
/**
* Generate content using local Ollama AI models
*
* @fullPath api.gregorovich.ollama.generate.create
* @service gregorovich
* @domain ai-generation
* @dataMethod ollamaData.generate.create
* @discoverable true
* @searchTerms ["ollama", "generate", "ai", "local", "llm", "model", "text", "completion", "generation"]
* @relatedEndpoints ["api.gregorovich.chatGpt.ask.get", "api.p21Pim.ai.suggestions.get", "api.agrSite.ai.transcripts.create", "api.gregorovich.documents.list"]
* @commonPatterns ["Generate AI content", "Local AI processing", "Text completion", "AI writing assistant", "Custom model inference", "Private AI generation"]
* @workflow ["content-generation", "ai-assistance", "text-processing", "creative-writing", "code-generation", "document-analysis"]
* @prerequisites ["Public bearer token", "x-site-id header", "Model name", "Generation prompt", "Ollama service availability"]
* @nextSteps ["Process generated content", "Refine generation parameters", "Chain multiple generations", "Store generation results"]
* @businessRules ["Requires model and prompt parameters", "Supports local model execution", "Configurable generation parameters", "Token and context management", "Performance monitoring"]
* @functionalArea "ai-and-automation"
* @crossSite "Multi-site AI generation support"
* @caching "Cache responses for 30 minutes for identical prompts and parameters"
* @performance "Local processing, faster than external APIs but depends on model size and complexity"
*
* @param params Ollama generation parameters including model, prompt, and generation configuration
* @returns Promise<OllamaGenerateResponse> Complete AI generation result with content, metadata, and performance metrics
*
* @example
* ```typescript
* // Simple text generation
* const generation = await client.ollama.generate.create({
* model: 'llama2',
* prompt: 'Write a short story about a robot learning to paint'
* });
* console.log(generation.data.response); // Generated story
* console.log(generation.data.total_duration); // Generation time
*
* // Get just the generated content
* const story = await client.ollamaData.generate.create({
* model: 'codellama',
* prompt: 'Create a Python function to calculate fibonacci numbers'
* });
* console.log(story.response); // Direct access to generated code
*
* // Advanced generation with parameters
* const technicalDoc = await client.ollamaData.generate.create({
* model: 'mistral',
* prompt: 'Explain machine learning algorithms for beginners',
* temperature: 0.3,
* max_tokens: 1500,
* system: 'You are a patient teacher who explains complex topics simply',
* format: 'json',
* top_p: 0.9,
* seed: 42
* });
*
* // Creative writing with higher randomness
* const creativeContent = await client.ollamaData.generate.create({
* model: 'neural-chat',
* prompt: 'Write a product description for a smart home device',
* temperature: 0.8,
* top_k: 40,
* repeat_penalty: 1.1,
* stop: ['\\n\\n', 'END'],
* sessionId: 'creative-session-1'
* });
*
* // Conversation with context
* const conversational = await client.ollamaData.generate.create({
* model: 'llama2-chat',
* prompt: 'What are the benefits of renewable energy?',
* context: previousContext, // Array of context tokens
* conversationId: 'energy-discussion',
* num_predict: 200
* });
* ```
*/
create: (params: OllamaGenerateRequestParams) => Promise<{
params: Record<string, unknown> | unknown[];
data: {} & {
[k: string]: unknown;
};
options: Record<string, unknown> | unknown[];
status: number;
message: string;
count: number;
total: number;
totalResults: number;
}>;
};
/**
* Creates the ollamaData.generate resource methods (data-only versions)
*/
export declare function createOllamaGenerateDataResource(ollamaGenerate: ReturnType<typeof createOllamaGenerateResource>): {
/**
* Generate content with Ollama and return generation result data only
* @param params Ollama generation parameters
* @returns Promise<OllamaGenerateResult> Generated content with metadata and performance metrics
*/
create: (params: OllamaGenerateRequestParams) => Promise<OllamaGenerateResult>;
};
export type OllamaGenerateResource = ReturnType<typeof createOllamaGenerateResource>;
export type OllamaGenerateDataResource = ReturnType<typeof createOllamaGenerateDataResource>;
export {};
//# sourceMappingURL=ollama-generate.d.ts.map