git-aiflow
Version:
🚀 An AI-powered workflow automation tool for effortless Git-based development, combining smart GitLab/GitHub merge & pull request creation with Conan package management.
259 lines • 9.23 kB
TypeScript
/**
* Reasoning configuration options
*/
export interface ReasoningConfig {
/** Enable reasoning with default parameters */
enabled?: boolean;
/** Reasoning effort level (OpenAI-style) */
effort?: 'high' | 'medium' | 'low';
/** Maximum reasoning tokens (Anthropic-style) */
max_tokens?: number;
/** Exclude reasoning tokens from response */
exclude?: boolean;
}
/**
* Result of AI-generated commit information
*/
export interface CommitGenerationResult {
commit: string;
branch: string;
description: string;
title: string;
}
/**
* Throughput statistics for API performance analysis
*/
export interface ThroughputStats {
/** Request start timestamp in milliseconds */
startTime: number;
/** Request end timestamp in milliseconds */
endTime: number;
/** Total response time in milliseconds */
responseTimeMs: number;
/** Total response time in seconds */
responseTimeSeconds: number;
/** Number of prompt tokens */
promptTokens: number;
/** Number of completion tokens */
completionTokens: number;
/** Total number of tokens */
totalTokens: number;
/** Overall throughput in tokens per second */
tokensPerSecond: number;
/** Input throughput in prompt tokens per second */
promptTokensPerSecond: number;
/** Output throughput in completion tokens per second */
completionTokensPerSecond: number;
/** Time per token in milliseconds */
millisecondsPerToken: number;
/** Performance level assessment */
performanceLevel: string;
/** Time to first token (approximation) */
ttftMs: number;
/** Average time between tokens in milliseconds */
avgTimeBetweenTokensMs: number;
/** Output to input token ratio */
outputInputRatio: number;
/** Model name used for the request */
model: string;
/** Timestamp when stats were recorded */
recordedAt: number;
}
/**
* OpenAI API service for generating commit message and branch name
*/
export declare class OpenAiService {
private readonly client;
private readonly model;
private readonly reasoning;
/** Cache for storing detected context limits by model name */
private static readonly contextLimitCache;
/** Cache for the latest throughput statistics */
private lastThroughputStats;
/** History of throughput statistics (limited to last 10 requests) */
private throughputHistory;
constructor(apiKey: string, apiUrl: string, model: string, reasoning?: boolean | ReasoningConfig);
/**
* Generate commit message, branch name and MR description.
* Supports batch processing for large diffs that exceed context limits.
*
* @param diff The git diff content to analyze
* @param language Language code for generated content (default: 'en')
* @returns Promise resolving to commit generation result
*/
generateCommitAndBranch(diff: string, language?: string): Promise<CommitGenerationResult>;
/**
* Process diff directly using original logic for small diffs.
*
* @param diff The git diff content to analyze
* @param language Language code for generated content
* @returns Promise resolving to commit generation result
*/
private generateDirectCommitAndBranch;
/**
* Detect the context length limit for the current model.
* Supports various AI models including OpenAI, DeepSeek, Qwen, Kimi, Ollama, etc.
* Also handles model variants with similar context limits.
*
* @returns Promise resolving to the token limit for the model
*/
private detectContextLimit;
/**
* Test if a model can handle a specific context limit by making a small API call.
*
* @param contextLimit The context limit to test
* @returns Promise resolving to true if the limit is supported
*/
private testContextLimit;
/**
* Get context limit for a specific model, including variant handling.
*
* @param modelName The model name to check
* @returns Token limit for the model
*/
private getModelContextLimit;
/**
* Estimate token count for text using an improved approach that handles different character types.
*
* @param text The text to estimate tokens for
* @returns Estimated token count
*/
private estimateTokenCount;
/**
* Split diff content by files using git diff headers.
*
* @param diff Complete git diff content
* @returns Map of file path to diff content
*/
private splitDiffByFiles;
/**
* Split large file diff into smaller chunks by code blocks.
*
* @param fileDiff Single file diff content
* @param maxTokens Maximum tokens per chunk
* @returns Array of split diff chunks
*/
private splitLargeFileDiff;
/**
* Group multiple small diffs into batches that don't exceed the token limit.
*
* @param fileDiffs Map of file paths to diff content
* @param maxTokens Maximum tokens per batch
* @returns Array of diff chunks within token limits
*/
private groupDiffsWithinLimit;
/**
* Create an empty diff chunk with initialized properties.
*
* @returns Empty diff chunk object
*/
private createEmptyDiffChunk;
/**
* Generate commit information for a single diff chunk.
*
* @param diffChunk Diff chunk content with metadata
* @param language Language code for generated content
* @returns Promise resolving to batch generation result
*/
private generateBatchCommitAndBranch;
/**
* Merge results from multiple batch processing operations.
*
* @param batchResults Array of batch generation results
* @param language Language code for generated content
* @returns Promise resolving to merged final result
*/
private mergeBatchResults;
/**
* Validate if the provided text is a valid git diff format.
*
* @param diff The diff content to validate
* @returns True if the diff appears to be valid
*/
private isValidDiff;
/**
* Calculate reserved tokens based on model context limit.
* Reserves space for system prompt, response, and safety buffer.
*
* @param contextLimit Total context limit for the model
* @returns Number of tokens to reserve
*/
private calculateReservedTokens;
/**
* Send request to OpenAI API with tool support
*
* @param messages Array of messages for the API request
* @param useTools Whether to include output_with_json tool (default: true)
* @returns Promise resolving to the raw response content or parsed tool call result
*/
private sendOpenAiRequest;
/**
* Clean and parse OpenAI response content
*
* @param rawContent Raw response content from OpenAI (could be tool call arguments or regular content)
* @param errorContext Context string for error logging
* @returns Parsed JSON object
*/
private parseOpenAiResponse;
/**
* Build system prompt for commit analysis
*
* @param language Target language for generated content
* @param contextInfo Optional context information for partial diffs
* @returns System prompt string
*/
private buildSystemPrompt;
/**
* Build user prompt for commit analysis
*
* @param contextInfo Optional context information for partial diffs
* @returns User prompt string
*/
private buildUserPrompt;
/**
* Build reasoning configuration based on the reasoning parameter
* @returns Reasoning configuration object or null
*/
private buildReasoningConfig;
/**
* Get language display name for prompt
*/
private getLanguageName;
/**
* Calculate and log throughput statistics, also cache the data
* @param usage OpenAI API usage statistics
* @param startTime Request start time in milliseconds
* @param endTime Request end time in milliseconds
*/
private logThroughputStats;
/**
* Get the latest throughput statistics from the most recent API call
* @returns The latest throughput stats or null if no calls have been made
*/
getLastThroughputStats(): ThroughputStats | null;
/**
* Get throughput statistics history (up to last 10 requests)
* @returns Array of throughput statistics, ordered from oldest to newest
*/
getThroughputHistory(): ThroughputStats[];
/**
* Get aggregated throughput statistics from history
* @returns Aggregated statistics or null if no history exists
*/
getAggregatedThroughputStats(): {
totalRequests: number;
averageResponseTime: number;
averageThroughput: number;
totalTokens: number;
totalPromptTokens: number;
totalCompletionTokens: number;
bestPerformance: ThroughputStats | null;
worstPerformance: ThroughputStats | null;
performanceTrend: 'improving' | 'declining' | 'stable' | 'insufficient_data';
} | null;
/**
* Clear throughput statistics history and cache
*/
clearThroughputStats(): void;
}
//# sourceMappingURL=openai-service.d.ts.map