devibe
Version:
Intelligent repository cleanup with auto mode, AI learning, markdown consolidation, auto-consolidate workflow, context-aware classification, and cost optimization
157 lines • 4.09 kB
TypeScript
/**
* AI Batch Optimizer
*
* Intelligent batching strategy to maximize files per API call
* while staying within context window limits.
*/
export interface FileInfo {
path: string;
name: string;
size: number;
content: string;
}
export interface BatchGroup {
files: FileWithSample[];
estimatedInputTokens: number;
estimatedOutputTokens: number;
tier: 'micro' | 'small' | 'medium' | 'large';
}
export interface FileWithSample {
path: string;
name: string;
size: number;
contentSample: string;
samplingStrategy: string;
}
import type { ModelConfig } from './ai-model-config.js';
declare const DEFAULT_BATCH_CONFIG: {
CHARS_PER_TOKEN: number;
BASE_PROMPT_TOKENS: number;
PER_FILE_OVERHEAD_TOKENS: number;
PER_FILE_RESPONSE_TOKENS: number;
SAFETY_MARGIN_PERCENT: number;
TIERS: {
micro: {
maxSize: number;
contentChars: number;
name: string;
};
small: {
maxSize: number;
contentChars: number;
name: string;
};
medium: {
maxSize: number;
contentChars: number;
name: string;
};
large: {
maxSize: number;
contentChars: number;
name: string;
};
};
};
export interface BatchConfig {
maxContextTokens: number;
targetInputTokens: number;
safetyMargin: number;
charsPerToken: number;
basePromptTokens: number;
perFileOverheadTokens: number;
perFileResponseTokens: number;
tiers: typeof DEFAULT_BATCH_CONFIG.TIERS;
}
/**
* Create model-specific batch configuration
*/
export declare function createBatchConfig(model: ModelConfig): BatchConfig;
export declare const BATCH_CONFIG: {
CHARS_PER_TOKEN: number;
BASE_PROMPT_TOKENS: number;
PER_FILE_OVERHEAD_TOKENS: number;
PER_FILE_RESPONSE_TOKENS: number;
SAFETY_MARGIN_PERCENT: number;
TIERS: {
micro: {
maxSize: number;
contentChars: number;
name: string;
};
small: {
maxSize: number;
contentChars: number;
name: string;
};
medium: {
maxSize: number;
contentChars: number;
name: string;
};
large: {
maxSize: number;
contentChars: number;
name: string;
};
};
};
export declare class AIBatchOptimizer {
private model?;
private config;
constructor(model?: ModelConfig | undefined);
/**
* Estimate tokens for a string (conservative estimate)
*/
private estimateTokens;
/**
* Determine optimal content sampling for a file
*/
private sampleFileContent;
/**
* Calculate optimal batches for maximum throughput
* Now uses model-specific context window limits for intelligent packing
*/
createOptimalBatches(files: FileInfo[]): BatchGroup[];
/**
* Build a compact, token-efficient prompt
*/
buildCompactPrompt(batch: BatchGroup, repositories: string[]): string;
/**
* Get statistics about batching efficiency
*/
getBatchStats(batches: BatchGroup[]): {
totalFiles: number;
totalBatches: number;
avgFilesPerBatch: number;
avgInputTokens: number;
avgOutputTokens: number;
estimatedCost: number;
byTier: Record<string, number>;
};
/**
* Validate that batches fit within limits
*/
validateBatches(batches: BatchGroup[]): {
valid: boolean;
issues: string[];
};
/**
* Get model info for reporting
*/
getModelInfo(): {
name: string;
contextWindow: number;
} | null;
/**
* Calculate efficiency metrics
*/
calculateEfficiency(batches: BatchGroup[]): {
contextUtilization: number;
avgBatchSize: number;
totalApiCalls: number;
estimatedTimeSeconds: number;
};
}
export {};
//# sourceMappingURL=ai-batch-optimizer.d.ts.map