toxblock
Version:
A professional TypeScript module that uses Gemini AI to detect profanity and toxic content in all languages
188 lines • 5.68 kB
TypeScript
/**
* @fileoverview ToxBlock - A professional TypeScript module for detecting profanity and toxic content using Gemini AI
* @version 1.0.0
* @author sw3do
* @license MIT
*/
/**
* Configuration options for ToxBlock
*
* @interface ToxBlockConfig
* @example
* ```typescript
* const config: ToxBlockConfig = {
* apiKey: 'your-gemini-api-key',
* model: 'gemini-2.0-flash-001',
* timeout: 15000
* };
* ```
*/
export interface ToxBlockConfig {
/** Google Gemini API key - required for authentication */
apiKey: string;
/** Model name to use (default: 'gemini-2.0-flash-001') */
model?: string;
/** Custom prompt template for profanity detection - use {TEXT} placeholder */
customPrompt?: string;
/** Timeout for API requests in milliseconds (default: 10000) */
timeout?: number;
}
/**
* Result of profanity detection analysis
*
* @interface ToxBlockResult
* @example
* ```typescript
* const result: ToxBlockResult = {
* isProfane: false,
* confidence: 0.95,
* language: 'en',
* details: 'Clean content detected'
* };
* ```
*/
export interface ToxBlockResult {
/** Whether the text contains profanity, toxic content, or inappropriate language */
isProfane: boolean;
/** Confidence score between 0 and 1 (higher means more confident) */
confidence: number;
/** Detected language code (e.g., 'en', 'es', 'fr') or 'unknown' */
language?: string;
/** Additional details about the detection or error information */
details?: string | undefined;
}
/**
* Error thrown by ToxBlock operations
*
* @class ToxBlockError
* @extends Error
* @example
* ```typescript
* try {
* await toxBlock.checkText(invalidInput);
* } catch (error) {
* if (error instanceof ToxBlockError) {
* console.log('Error code:', error.code);
* console.log('Original error:', error.originalError);
* }
* }
* ```
*/
export declare class ToxBlockError extends Error {
/** Error code for categorization (e.g., 'INVALID_CONFIG', 'ANALYSIS_FAILED') */
readonly code: string;
/** Original error that caused this error, if any */
readonly originalError: Error | undefined;
/**
* Creates a new ToxBlockError instance
*
* @param message - Error message describing what went wrong
* @param code - Error code for categorization and handling
* @param originalError - Original error that caused this error (optional)
*/
constructor(message: string, code: string, originalError?: Error);
}
/**
* ToxBlock - A professional profanity detection module using Gemini AI
*
* Provides comprehensive text analysis for detecting profanity, toxic content,
* hate speech, and inappropriate language across multiple languages.
*
* @class ToxBlock
* @example
* ```typescript
* // Basic usage
* const toxBlock = new ToxBlock({ apiKey: 'your-gemini-api-key' });
* const result = await toxBlock.checkText('Hello world');
* console.log(result.isProfane); // false
* console.log(result.confidence); // 0.95
*
* // Batch processing
* const results = await toxBlock.checkTexts(['Hello', 'Bad word']);
* results.forEach((result, index) => {
* console.log(`Text ${index}: ${result.isProfane ? 'Toxic' : 'Clean'}`);
* });
*
* // Custom configuration
* const customToxBlock = new ToxBlock({
* apiKey: 'your-api-key',
* model: 'gemini-2.0-flash-001',
* timeout: 15000,
* customPrompt: 'Analyze this text: {TEXT}'
* });
* ```
*/
export declare class ToxBlock {
/** Google Gemini AI instance */
private readonly genAI;
/** AI model name to use for analysis */
private readonly model;
/** Custom prompt template for profanity detection */
private readonly customPrompt;
/** Timeout for API requests in milliseconds */
private readonly timeout;
/**
* Default prompt template for profanity detection
*/
private static readonly DEFAULT_PROMPT;
/**
* Creates a new ToxBlock instance
*
* @param config - Configuration options
* @throws {ToxBlockError} When configuration is invalid
*/
constructor(config: ToxBlockConfig);
/**
* Checks if the provided text contains profanity or toxic content
*
* @param text - The text to analyze
* @returns Promise resolving to detection result
* @throws {ToxBlockError} When analysis fails
*
* @example
* ```typescript
* const result = await toxBlock.checkText('This is a test');
* if (result.isProfane) {
* console.log('Profanity detected!');
* }
* ```
*/
checkText(text: string): Promise<ToxBlockResult>;
/**
* Checks multiple texts in batch
*
* @param texts - Array of texts to analyze
* @returns Promise resolving to array of detection results
* @throws {ToxBlockError} When batch analysis fails
*/
checkTexts(texts: string[]): Promise<ToxBlockResult[]>;
/**
* Parses the AI response and extracts profanity detection result
*
* @private
* @param responseText - Raw response from Gemini AI
* @returns Parsed detection result
*/
private parseResponse;
/**
* Gets the current configuration
*
* @returns Current model and timeout settings
*/
getConfig(): {
model: string;
timeout: number;
};
}
/**
* Default export for convenience
*/
export default ToxBlock;
/**
* Creates a ToxBlock instance with the provided configuration
*
* @param config - Configuration options
* @returns New ToxBlock instance
*/
export declare function createToxBlock(config: ToxBlockConfig): ToxBlock;
//# sourceMappingURL=index.d.ts.map