@aituber-onair/core
Version:
Core library for AITuber OnAir providing voice synthesis and chat processing
57 lines • 1.81 kB
JavaScript
import { MODEL_GPT_4O_MINI, MODEL_GPT_4O, MODEL_O3_MINI, VISION_SUPPORTED_MODELS } from '../../../constants';
import { OpenAIChatService } from '../OpenAIChatService';
/**
* OpenAI API provider implementation
*/
export class OpenAIChatServiceProvider {
/**
* Create a chat service instance
* @param options Service options
* @returns OpenAIChatService instance
*/
createChatService(options) {
// Use the visionModel if provided, otherwise use the model that supports vision
const visionModel = options.visionModel ||
(this.supportsVisionForModel(options.model || this.getDefaultModel())
? options.model
: MODEL_GPT_4O_MINI);
return new OpenAIChatService(options.apiKey, options.model, visionModel);
}
/**
* Get the provider name
* @returns Provider name ('openai')
*/
getProviderName() {
return 'openai';
}
/**
* Get the list of supported models
* @returns Array of supported model names
*/
getSupportedModels() {
return [MODEL_GPT_4O_MINI, MODEL_GPT_4O, MODEL_O3_MINI];
}
/**
* Get the default model
* @returns Default model name
*/
getDefaultModel() {
return MODEL_GPT_4O_MINI;
}
/**
* Check if this provider supports vision (image processing)
* @returns Vision support status (true)
*/
supportsVision() {
return true;
}
/**
* Check if a specific model supports vision capabilities
* @param model The model name to check
* @returns True if the model supports vision, false otherwise
*/
supportsVisionForModel(model) {
return VISION_SUPPORTED_MODELS.includes(model);
}
}
//# sourceMappingURL=OpenAIChatServiceProvider.js.map