mmir-lib
Version:
MMIR (Mobile Multimodal Interaction and Relay) library
1,264 lines (1,173 loc) • 82.5 kB
JavaScript
define(['mmirf/util/deferred', 'mmirf/util/extend', 'mmirf/util/isArray', 'mmirf/resources', 'mmirf/configurationManager', 'mmirf/logger', 'mmirf/events', 'module'],
/**
* The MediaManager gives access to audio in- and output functionality.
*
* Depending on its configuration, the MediaManager loads different implementation modules
* (<em>plugins</em>) that realize the interface-functions differently.
*
* See directory <code>mmirf/env/media</code> for available plugins.
*
* This "class" is a singleton - so that only one instance is in use.<br>
*
* @class
* @name MediaManager
* @memberOf mmir
* @static
* @hideconstructor
*
* TODO remove / change dependency on forBrowser: res.isBrowserEnv()!!!
*/
function(
deferred, extend, isArray, res, configurationManager, Logger, EventEmitter, module
){
//the next comment enables JSDoc2 to map all functions etc. to the correct class description
/** @scope mmir.MediaManager.prototype */
/**
* The instance that holds the singleton MediaManager object.
* @private
* @type MediaManager
* @memberOf MediaManager#
*/
var instance = null;
/**
* The logger for the MediaManager.
*
* Exported as <code>_log</code> by the MediaManager instance.
*
* @private
* @memberOf MediaManager#
*/
var logger = Logger.create(module);//initialize with requirejs-module information
/**
* HELPER get list of require plugins for an environment
*
* supported environments: <default> | 'cordova'
*
* TODO do we need to differentiate between more environments?
*
* @param {Boolean} isCordovaEnv TRUE for cordova-environments, otherwise FALSE
* @return {Array<PluginEntry>} the list of required PluginEntry object for the env
*
* @private
* @memberOf mmir.MediaManager#
*/
function getRequiredPlugins(isCordovaEnv){
return isCordovaEnv? [{mod: 'cordovaAudio', type: 'audio'}] : [{mod: 'webAudio', type: 'audio'}];
}
/**
* default configuration for env-settings "browser" and "cordova":
*
* -> may be overwritten by settings in the configuration file.
* e.g. adding the following JSON data to config/configuration.json:
* <pre>
* "mediaManager": {
* "plugins": {
* "browser": ["webAudio",
* "webspeechAudioInput",
* {"mod": "audiotts", "config": "ttsMary", "type": "tts"},
* {"mod": "webspeechAudioInput", "type": "asr", "ctx": "chrome"}
* ],
* "cordova": ["cordovaAudio",
* "mmir-plugin-speech-nuance",
* "mmir-plugin-speech-nuance/ttsAndroid",
* {"mod": "mmir-plugin-speech-android", "type": "asr", "ctx": "native"},
* {"mod": "mmir-plugin-speech-android/ttsAndroid", "type": "tts", "ctx": "native"},
* {"mod": "audiotts", "config": "ttsMary", "type": "tts", "ctx": "web"}
* ]
* }
* }
* </pre>
*
* @private
* @type PlainObject
*
* @memberOf MediaManager#
*/
var _defaultPlugins = {
'browser': getRequiredPlugins(false).concat([
{mod: 'webspeechAudioInput', type: 'asr'},
{mod: 'audiotts', config: 'ttsMary', type: 'tts'}
]),
'cordova': getRequiredPlugins(true).concat([
{mod: 'asrAndroid', type: 'asr'},
{mod: 'audiotts', config: 'ttsMary', type: 'tts'}
])
};
/**
* Mapping for modules to default module configurations.
*
* This is mainly used for backwards compatibility, to map deprecated modules to their
* new/corresponding configuration.
*
* Maps a module name/file to the corresponding (new) module configuration.
*
* NOTE: The module's name/file are in lower case.
*
* TODO extract to loadable migration module
*
* @private
* @type PlainObject
*
* @memberOf MediaManager#
*/
var _pluginsConfig = {
'marytexttospeech': {mod: 'audiotts', config: 'ttsMary', type: 'tts'},
'html5audioinput': {mod: 'webAudioInput', config: 'asrGoogleXhr', type: 'asr'},
'webkitaudioinput': {mod: 'webspeechAudioInput', type: 'asr'},
'html5audiooutput': {mod: 'webAudio', type: 'audio'},
'cordovaaudiooutput': {mod: 'cordovaAudio', type: 'audio'},
'webaudiotexttospeech': {mod: 'audiotts', config: 'ttsMary', type: 'tts'}
};
/**
* Mapping for modules' config:
*
* This is used for backwards compatibility, to map deprecated module config fields to their
* new/corresponding configuration.
*
* NOTE: The config name/file are in lower case.
*
* TODO extract to loadable migration module
*
* @private
* @type PlainObject
*
* @memberOf MediaManager#
*/
var _pluginsConfigConfig = {
'webttsmaryimpl': 'ttsMary',
// 'webttsnuanceimpl': 'ttsNuanceXhr',
// 'ttsspeakJsimpl': 'ttsSpeakjs',
// 'webasrgoogleimpl': 'asrGoogleXhr',
// 'webasrnuanceimpl': 'asrNuanceXhr',
// 'webasrnuancewsimpl': 'asrNuanceWs',
};
/**
* HELPER create a non-functional stub-function for a non-functional module/plugin:
* the stub tries to decern the error-callback from its invocation arguments
* and triggers it with a message (or logs an error )
*
* @param {String} mod the module/plugin name
* @param {String} funcName the function name (of the function that is not-functional/mocked)
* @param {String} [message] OPTIONAL an additional message text for the error callback (or error logging)
* @param {String} [optionsErrorCallbackName] OPTIONAL the name in the options (i.e. args-argument) for the error callback (default is 'error'); NOTE: positional argument
* @return {Function} function that triggeres error callback function([args], onSuccess, onError)
* where args is optional:
* if onError is undefined, and args.error is a funtion, then args.error is used as error callback
* if there is nor error callback, the error will be logged
*
* @private
* @function
*
* @memberOf MediaManager#
*/
var createNonFunctional = function(mod, funcName, message, optionsErrorCallbackName){
return function _nonFunctional(args, onSuccess, onError){
optionsErrorCallbackName = optionsErrorCallbackName || 'error'
if(typeof args === 'function'){
onError = onSuccess;
onSuccess = args;
args = void(0);
}
if(typeof onError !== 'function' && args && typeof args[optionsErrorCallbackName] === 'function'){
onError = args[optionsErrorCallbackName];
}
var msg = mod + '.' + funcName + '() is disabled.' + (message? ' ' + message : '');
if(typeof onError === 'function'){
onError(msg);
} else {
logger.error(mod, funcName, msg);
}
}
}
/**
* Load an media-module implementation from plugin file.
*
* @param {String} fileName
* the file-name of the media-module that will be loaded.
* The file needs to be located in {@link mmir.Resources#getMediaPluginPath}.
* If fileName does not end with suffix ".js", it will be added, before
* loading the file.
* @param {Function} successCallback
* invoked with function(filePath, exportedFunctions, nonFunctional, pluginIndex) upon successfully loading the plugin.
* @param {Function} failureCallback
* @param {String} [execId]
* the context-ID into which the implementation of the media-module will be loaded.
* If omitted or FALSY, the default context will be used.
* NOTE: positional argument
* @param {any} [config]
* a configuration value that will be passed to the media-module upon its initialization
* NOTE: positional argument
* @param {number} pluginIndex
* the (zero-based) index of the plugin within the plugin loading list
*
* @private
* @function
*
* @memberOf MediaManager#
*/
var loadPlugin = function loadPlugin(filePath, successCallback, failureCallback, execId, config, pluginIndex){
try {
if((typeof WEBPACK_BUILD === 'undefined' || !WEBPACK_BUILD) && !/\.js$/i.test(filePath)){
filePath += '.js';
}
var processLoaded = function(newMediaPlugin){
/**
* callback handler that is invoked by the loaded media plugin after initialization
*
* @param {PlainObject} exportedFunctions dictionary with the exported functions / fields
* @param {promise | mmir.interface.DisabledPluginInfo} [nonFunctional] OPTIONAL
* OPTIONAL argument that is supplied in case the plugin is not functional:
* if it is promise-like (i.e. then()-able), it is resolved, before continuing with its result as argument for nonFunctional
* if nonFunctional.disabled is a list, corresponding function stubs will be created in exportedFunctions
*/
var onInitialized = function(exportedFunctions, nonFunctional){// nonFunction: {disabled: boolean | string[] | {[func: string]: Function}, mod?: string, message?: string, errorCallbackName?: string} | promise
if(nonFunctional){
// if nonFunctional is promise-like: resolve promise an re-invoke with the result
if(typeof nonFunctional.then === 'function'){
if(typeof nonFunctional.catch === 'function'){
nonFunctional.catch(function(err){
logger.warn('Error resolving non-functional promise when loading MediaPlugin '+filePath+': '+err, err);
onInitialized(exportedFunctions, true);
});
}
nonFunctional.then(function(res){
onInitialized(exportedFunctions, res);
});
return; ////////////////// EARLY EXIT //////////////////////
}
if(nonFunctional.disabled) {
// create function-stubs for non-functional (function-name-) list:
if(isArray(nonFunctional.disabled)){
var nonFuncMod = nonFunctional.mod || filePath;
var nonFuncMsg = nonFunctional.message || '';
exportedFunctions = exportedFunctions || {};
var list = nonFunctional.disabled, name;
for(var i=0,size=list.length; i < size; ++i){
name = list[i];
exportedFunctions[name] = createNonFunctional(nonFuncMod, name, nonFuncMsg, nonFunctional.errorCallbackName);
}
} else if(typeof nonFunctional.disabled === 'object'){
// -> apply function definitions from nonFunctional.disabled to exportedFunctions
exportedFunctions = exportedFunctions || {};
var disabledFuncs = nonFunctional.disabled;
for(var fname in disabledFuncs){
exportedFunctions[fname] = disabledFuncs[fname];
}
}
}
}
if(execId){
//create new "execution context" if necessary
if(typeof instance.ctx[execId] === 'undefined'){
instance.ctx[execId] = {};
}
//import functions and properties into execution-context:
var func;
for(var p in exportedFunctions){
if(exportedFunctions.hasOwnProperty(p)){
//only allow extension of the execution-context, no overwriting:
if(typeof instance.ctx[execId][p] === 'undefined'){
func = exportedFunctions[p];
if(typeof func === 'function'){
//need to "re-map" the execution context for the functions,
// so that "they think" they are actually executed within the MediaManager instance
(function(mediaManagerInstance, originalFunc, name, context, ttsFieldExists){
//NOTE need closure to "preserve" values of for-iteration
mediaManagerInstance.ctx[context][name] = function(){
// logger.log('executing '+context+'.'+name+', in context '+mediaManagerInstance,mediaManagerInstance);//DEBUG
return originalFunc.apply(mediaManagerInstance, arguments);
};
//add alias 'tts' for 'textToSpeech'
if(!ttsFieldExists && name === 'textToSpeech'){
logger.error('outdated TTS plugin '+filePath+': plugin implementation should replace textToSpeech() with tts()!');
mediaManagerInstance.ctx[context]['tts'] = mediaManagerInstance.ctx[context]['textToSpeech'];
}
})(instance, func, p, execId, exportedFunctions['tts']);
}
else {
//for non-functions: just attach to the new "sub-context"
instance.ctx[execId][p] = func;
}
} else {
//if there already is a function/property for this in the execution-context,
// print out an error:
logger.error('MediaManager', 'loadPlugin',
'cannot load implemantion for '+p+' of plugin "'+filePath+
'" into execution-context "'+execId+
'": context already exists!'
);
}
}//END if(exportedFunctions<own>)
}//END for(p in exprotedFunctions)
}//END if(execId)
else {
extend(instance,exportedFunctions);
//add alias 'tts' for 'textToSpeech'
if(typeof exportedFunctions['textToSpeech'] === 'function' && !exportedFunctions['tts']){
logger.error('outdated TTS plugin '+filePath+': plugin implementation should replace textToSpeech() with tts()!');
instance['tts'] = exportedFunctions['textToSpeech'];
}
}
if (successCallback) successCallback(filePath, exportedFunctions, nonFunctional, pluginIndex);
};//END: var onInitialized = function(exportedFunctions, ...
newMediaPlugin.initialize(onInitialized, execId, config);
};//END: var processLoaded = function(newMediaPlugin){...
if(typeof WEBPACK_BUILD !== 'undefined' && WEBPACK_BUILD){
var modResult;
filePath = filePath.replace(/\.js$/i, '');
try {
//TODO convert file-URLs to alias/module IDs and only use __webpack_require__ (& create include list when building from configuration.json)
modResult = require('../env/media/'+filePath);
} catch(err){
//load filePath "raw" as module ID:
modResult = __webpack_require__(filePath);
}
processLoaded(modResult);
} else {
require([res.getMediaPluginPath() + filePath], processLoaded, function(_err){
//try filePath as module ID instead:
var moduleId = filePath.replace(/\.js$/i, '');
if(logger.isd()) logger.debug('failed loading plugin from file '+(res.getMediaPluginPath() + filePath)+', trying module ID ' + moduleId)
require([moduleId], processLoaded, failureCallback)
});
}
} catch (e){
logger.error('Error loading MediaPlugin '+filePath+': '+e, e);
if (failureCallback) failureCallback();
}
};
/**
* @constructs MediaManager
* @memberOf MediaManager.prototype
* @private
* @ignore
*/
function constructor(){
/**
* event emitter / manager for media events
*
* @private
* @type mmir.tools.EventEmitter
* @memberOf MediaManager.prototype
*/
var listener = new EventEmitter(null);
/**
* event emitter / manager of listener-observers:
* observers get notified if a listener for event X gets added/removed
*
* @private
* @type mmir.tools.EventEmitter
* @memberOf MediaManager.prototype
*/
var listenerObserver = new EventEmitter(null);
/**
* exported as addListener() and on()
*
* @private
* @memberOf MediaManager.prototype
*/
var addListenerImpl = function(eventName, eventHandler){
if(listener.on(eventName, eventHandler)){
//notify listener-observers for this event-type
this._notifyObservers(eventName, 'added', eventHandler);
}
};
/**
* exported as removeListener() and off()
*
* @private
* @memberOf MediaManager.prototype
*/
var removeListenerImpl = function(eventName, eventHandler){
if(listener.off(eventName, eventHandler)){
//notify listener-observers for this event-type
this._notifyObservers(eventName, 'removed', eventHandler);
return true;
}
return false;
};
/**
* Default execution context for functions:
*
* if not <code>falsy</code>, then functions will be executed in this context by default.
*
* @private
* @type String
* @memberOf MediaManager.prototype
*/
var defaultExecId = void(0);
/** @lends mmir.MediaManager.prototype */
return {
/**
* A logger for the MediaManager and its plugins/modules.
*
* <p>
* This logger MAY be used by media-plugins and / or tools and helpers
* related to the MediaManager.
*
* <p>
* This logger SHOULD NOT be used by "code" that non-related to the
* MediaManager
*
* @name _log
* @type mmir.tools.Logger
* @default mmir.Logger (logger instance for mmir.MediaManager)
* @public
*
* @memberOf mmir.MediaManager#
*/
_log: logger,
/**
* Execution context for plugins:
*
* dictionary for non-default execution contexts (as specified via plugin configuration "ctx")
*
* @name ctx
* @type Object
* @default Object (empty context, i.e. plugins are loaded into the "root context", and no plugins loaded into the execution context)
* @public
*
* @memberOf mmir.MediaManager#
*/
ctx: {},
/**
* List of loaded media plugins
* <pre>{mod: <module/plugin name or file or id>, type: "asr" | "tts" | "audio" | "custom", config?: any, disabled?: boolean | NonFunctionalInfo}</pre>
*
* @name plugins
* @type Array<PluginLoadConfig>
* @public
*
* @memberOf mmir.MediaManager#
*/
plugins: null,
/**
* Wait indicator, e.g. for speech input:
* <p>
* provides 2 functions:<br>
*
* <code>preparing()</code>: if called, the implementation indicates that the "user should wait"<br>
* <code>ready()</code>: if called, the implementation stops indicating that the "user should wait" (i.e. that the system is ready for user input now)<br>
*
* <p>
* If not set (or functions are not available) will do nothing
*
* @type mmir.env.media.IWaitReadyIndicator
* @memberOf mmir.MediaManager#
*
* @default Object (no implementation set)
*
* @see #_preparing
* @see #_ready
*
* @memberOf mmir.MediaManager#
*
* @example
* //define custom wait/ready implementation:
* var impl = {
* preparing: function(str){
* console.log('Media module '+str+' is preparing...');
* },
* ready: function(str){
* console.log('Media module '+str+' is ready now!');
* }
* };
*
* //configure MediaManager to use custom implementation:
* mmir.MediaManager.waitReadyImpl = impl;
*
* //-> now plugins that call mmir.MediaManager._preparing() and mmir.MediaManager._ready()
* // will invoke the custom implementation's functions.
*/
waitReadyImpl: {},
//... these are the standard audioInput procedures, that should be implemented by a loaded module/file:
///////////////////////////// audio input API: /////////////////////////////
/**
* Start speech recognition with <em>end-of-speech</em> detection:
*
* the recognizer automatically tries to detect when speech has finished and
* triggers the status-callback accordingly with results.
*
* @async
*
* @param {PlainObject} [options] OPTIONAL
* options for Automatic Speech Recognition:
* <pre>{
* success: OPTIONAL Function, the status-callback (see arg statusCallback)
* , error: OPTIONAL Function, the error callback (see arg failureCallback)
* , language: OPTIONAL String, the language for recognition (if omitted, the current language setting is used)
* , intermediate: OTPIONAL Boolean, set true for receiving intermediate results (NOTE not all ASR engines may support intermediate results)
* , results: OTPIONAL Number, set how many recognition alternatives should be returned at most (NOTE not all ASR engines may support this option)
* , mode: OTPIONAL "search" | "dictation", set how many recognition alternatives should be returned at most (NOTE not all ASR engines may support this option)
* , eosPause: OTPIONAL "short" | "long", length of pause after speech for end-of-speech detection (NOTE not all ASR engines may support this option)
* , disableImprovedFeedback: OTPIONAL Boolean, disable improved feedback when using intermediate results (NOTE not all ASR engines may support this option)
* }</pre>
*
* @param {Function} [statusCallback] OPTIONAL
* callback function that is triggered when, recognition starts, text results become available, and recognition ends.
* The callback signature is:
* <pre>
* callback(
* text: String | "",
* confidence: Number | Void,
* status: "FINAL"|"INTERIM"|"INTERMEDIATE"|"RECORDING_BEGIN"|"RECORDING_DONE",
* alternatives: Array<{result: String, score: Number}> | Void,
* unstable: String | Void,
* custom: any | Void
* )
* </pre>
*
* Usually, for status <code>"FINAL" | "INTERIM" | "INTERMEDIATE"</code> text results are returned, where
* <pre>
* "INTERIM": an interim result, that might still change
* "INTERMEDIATE": a stable, intermediate result
* "FINAL": a (stable) final result, before the recognition stops
* </pre>
* If present, the <code>unstable</code> argument provides a preview for the currently processed / recognized text.
*
* The <code>custom</code> argument is dependent on the ASR engine / plugin: specific implementations may return some custom results.
*
* <br>NOTE that when using <code>intermediate</code> mode, status-calls with <code>"INTERMEDIATE"</code> may
* contain "final intermediate" results, too.
*
* <br>NOTE: if used in combination with <code>options.success</code>, this argument will supersede the options
*
* @param {Function} [failureCallback] OPTIONAL
* callback function that is triggered when an error occurred.
* The callback signature is:
* <code>callback(error)</code>
*
* <br>NOTE: if used in combination with <code>options.error</code>, this argument will supersede the options
*
*
* @memberOf mmir.MediaManager#
*/
recognize: function(options, statusCallback, failureCallback){
if(typeof options === 'function'){
failureCallback = statusCallback;
statusCallback = options;
options = void(0);
}
var funcName = 'recognize';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback || (failureCallback = (options && options.error))){
failureCallback("Audio Input: Speech Recognition is not supported.");
}
else {
logger.error("Audio Input: Speech Recognition is not supported.");
}
},
/**
* Start continuous speech recognition:
*
* The recognizer continues until {@link #stopRecord} is called.
*
* @async
*
* @param {PlainObject} [options] OPTIONAL
* options for Automatic Speech Recognition:
* <pre>{
* success: OPTIONAL Function, the status-callback (see arg statusCallback)
* , error: OPTIONAL Function, the error callback (see arg failureCallback)
* , language: OPTIONAL String, the language for recognition (if omitted, the current language setting is used)
* , intermediate: OTPIONAL Boolean, set true for receiving intermediate results (NOTE not all ASR engines may support intermediate results)
* , results: OTPIONAL Number, set how many recognition alternatives should be returned at most (NOTE not all ASR engines may support this option)
* , mode: OTPIONAL "search" | "dictation", set how many recognition alternatives should be returned at most (NOTE not all ASR engines may support this option)
* , eosPause: OTPIONAL "short" | "long", length of pause after speech for end-of-speech detection (NOTE not all ASR engines may support this option)
* , disableImprovedFeedback: OTPIONAL Boolean, disable improved feedback when using intermediate results (NOTE not all ASR engines may support this option)
* }</pre>
*
* @param {Function} [statusCallback] OPTIONAL
* callback function that is triggered when, recognition starts, text results become available, and recognition ends.
* The callback signature is:
* <pre>
* callback(
* text: String | "",
* confidence: Number | Void,
* status: "FINAL"|"INTERIM"|"INTERMEDIATE"|"RECORDING_BEGIN"|"RECORDING_DONE",
* alternatives: Array<{result: String, score: Number}> | Void,
* unstable: String | Void,
* custom: any | Void
* )
* </pre>
*
* Usually, for status <code>"FINAL" | "INTERIM" | "INTERMEDIATE"</code> text results are returned, where
* <pre>
* "INTERIM": an interim result, that might still change
* "INTERMEDIATE": a stable, intermediate result
* "FINAL": a (stable) final result, before the recognition stops
* </pre>
* If present, the <code>unstable</code> argument provides a preview for the currently processed / recognized text.
*
* The <code>custom</code> argument is dependent on the ASR engine / plugin: specific implementations may return some custom results.
*
* <br>NOTE that when using <code>intermediate</code> mode, status-calls with <code>"INTERMEDIATE"</code> may
* contain "final intermediate" results, too.
*
* <br>NOTE: if used in combination with <code>options.success</code>, this argument will supersede the options
*
* @param {Function} [failureCallback] OPTIONAL
* callback function that is triggered when an error occurred.
* The callback signature is:
* <code>callback(error)</code>
*
* <br>NOTE: if used in combination with <code>options.error</code>, this argument will supersede the options
*
* @see #stopRecord
* @memberOf mmir.MediaManager#
*/
startRecord: function(options, statusCallback, failureCallback, isWithIntermediateResults){//TODO remove arg isWithIntermediateResults -> deprecated: use options instead
if(typeof options === 'function'){
isWithIntermediateResults = failureCallback;
failureCallback = statusCallback;
statusCallback = options;
options = void(0);
}
var funcName = 'startRecord';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback || (failureCallback = (options && options.error))){
failureCallback("Audio Input: Speech Recognition (recording) is not supported.");
}
else {
logger.error("Audio Input: Speech Recognition (recording) is not supported.");
}
},
/**
* Stops continuous speech recognition:
*
* After {@link #startRecord} was called, invoking this function will stop the recognition
* process and return the result by invoking the <code>succesCallback</code>.
*
* Note, that the <code>statusCallback</code> may not return an actual text result (i.e. the last
* text result may have been return in the <code>statusCallback</code> of the <code>startRecord()</code> call)
*
* @async
*
* @param {PlainObject} [options] OPTIONAL
* options for stopping the Automatic Speech Recognition:
* <pre>{
* success: OPTIONAL Function, the status-callback (see arg statusCallback)
* , error: OPTIONAL Function, the error callback (see arg failureCallback)
* }</pre>
*
*
* @param {Function} [statusCallback] OPTIONAL
* callback function that is triggered when, recognition starts, text results become available, and recognition ends.
* The callback signature is:
* <pre>
* callback(
* text: String | "",
* confidence: Number | Void,
* status: "FINAL"|"INTERIM"|"INTERMEDIATE"|"RECORDING_BEGIN"|"RECORDING_DONE",
* alternatives: Array<{result: String, score: Number}> | Void,
* unstable: String | Void,
* custom: any | Void
* )
* </pre>
*
* Usually, for status <code>"FINAL" | "INTERIM" | "INTERMEDIATE"</code> text results are returned, where
* <pre>
* "INTERIM": an interim result, that might still change
* "INTERMEDIATE": a stable, intermediate result
* "FINAL": a (stable) final result, before the recognition stops
* </pre>
* If present, the <code>unstable</code> argument provides a preview for the currently processed / recognized text.
*
* The <code>custom</code> argument is dependent on the ASR engine / plugin: specific implementations may return some custom results.
*
* <br>NOTE that when using <code>intermediate</code> mode (as option in <code>startRecord()</code>),
* status-calls with <code>"INTERMEDIATE"</code> may contain "final intermediate" results, too.
*
* @param {Function} [failureCallback] OPTIONAL
* callback function that is triggered when an error occurred.
* The callback signature is:
* <code>callback(error)</code>
*
*
* @see #startRecord
* @memberOf mmir.MediaManager#
*/
stopRecord: function(options, statusCallback, failureCallback){
if(typeof options === 'function'){
failureCallback = statusCallback;
statusCallback = options;
options = void(0);
}
var funcName = 'stopRecord';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Input: Speech Recognition (recording) is not supported.");
}
else {
logger.error("Audio Input: Speech Recognition (recording) is not supported.");
}
},
/**
* Cancel currently active speech recognition.
*
* Has no effect, if no recognition is active.
*
* @memberOf mmir.MediaManager#
*/
cancelRecognition: function(successCallback,failureCallback){
var funcName = 'cancelRecognition';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Input: canceling Recognize Speech is not supported.");
}
else {
logger.error("Audio Input: canceling Recognize Speech is not supported.");
}
},
///////////////////////////// ADDITIONAL (optional) ASR functions: /////////////////////////////
/**
* get list of supported languages for ASR (may not be supported by all plugins).
*
* @memberOf mmir.MediaManager#
*/
getRecognitionLanguages: function(successCallback,failureCallback){
var funcName = 'getRecognitionLanguages';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Input: retrieving list of available languages not supported.");
}
else {
logger.error("Audio Input: retrieving list of available languages not supported.");
}
},
/**
* Destroy the speech recognition instance and free up system resources.
*
* NOTE: may not be supported by all recognition implementations
* (e.g. if the impl. does not block system resources etc).
*
* NOTE: If it is not supported, <code>successCallback(false)</code> is triggered.
*
* IMPORTANT: pluins that support destroyRecognition() should also support initializeRecognition().
*
* @public
* @memberOf mmir.MediaManager#
*
* @param {Function} [successCallback] callback in case of success: <pre>successCallback(didDestroy: boolean)</pre>
* in case, the plugin does not support destroyRecognition(),
* <code>successCallback(false)</code> will be invoked
* @param {Function} [failureCallback] callback that will be invoked in case of an error: <pre>failureCallback(error)</pre>
*
* @see #initializeRecognition
*/
destroyRecognition: function(successCallback,failureCallback){
var funcName = 'destroyRecognition';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else {
if(logger.isDebug()) {
logger.debug("Audio Input: destroying the speech recognition instance is not supported.");
}
if(successCallback){
setTimeout(function() { successCallback(false); }, 0);
}
}
},
/**
* Re-initialize the speech recognition instance:
* should be called after invoking <code>destroyRecognition()<code> (and its success-callback returned <code>true</code>)
* before continuing to use the recognition instance.
*
* NOTE: may not be supported by all recognition implementations.
*
* NOTE: If it is not supported, <code>successCallback(false)</code> is triggered.
*
* IMPORTANT: pluins that support initializeRecognition() should also support destroyRecognition().
*
* @public
* @memberOf mmir.MediaManager#
*
* @param {Function} [successCallback] callback in case of success: <pre>successCallback(didDestroy: boolean)</pre>
* in case, the plugin does not support initializeRecognition(),
* <code>successCallback(false)</code> will be invoked
* @param {Function} [failureCallback] callback that will be invoked in case of an error: <pre>failureCallback(error)</pre>
*
* @see #destroyRecognitio
*/
initializeRecognition: function(successCallback,failureCallback){
var funcName = 'initializeRecognition';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else {
if(logger.isDebug()) {
logger.debug("Audio Input: re-initializing the speech recognition instance is not supported.");
}
if(successCallback){
setTimeout(function() { successCallback(false); }, 0);
}
}
},
///////////////////////////// audio output API: /////////////////////////////
/**
* Play PCM audio data.
*
* @param {Blob} blob
* @param {Function} [onPlayedCallback] OPTIONAL
* @param {Function} [failureCallback] OPTIONAL
*
* @memberOf mmir.MediaManager#
*/
playWAV: function(blob, onPlayedCallback, failureCallback){
var funcName = 'playWAV';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Output: play WAV audio is not supported.");
}
else {
logger.error("Audio Output: play WAV audio is not supported.");
}
},
/**
* Play audio file from the specified URL.
*
*
* @param {String} url
* @param {Function} [onPlayedCallback] OPTIONAL
* @param {Function} [failureCallback] OPTIONAL
*
* @memberOf mmir.MediaManager#
*/
playURL: function(url, onPlayedCallback, failureCallback){
var funcName = 'playURL';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Output: play audio from URL is not supported.");
}
else {
logger.error("Audio Output: play audio from URL is not supported.");
}
},
/**
* Play audio file from the specified URL or WAV data.
*
* Convenience function for {@link #playWAV} and {@link #playURL}:
* if first argument is a String, then <code>playURL</code> will be invoked,
* otherwise <code>playWAV</code>.
*
* @param {String | Blob} urlOrData
* @param {Function} [onPlayedCallback] OPTIONAL
* @param {Function} [failureCallback] OPTIONAL
*
* @memberOf mmir.MediaManager#
* @see #playWAV
* @see #playURL
*/
play: function(urlOrData, onPlayedCallback, failureCallback){
if(typeof urlOrData === 'string'){
return this.playURL.apply(this, arguments);
} else {
return this.playWAV.apply(this, arguments);
}
},
/**
* Get an audio object for the audio file specified by URL.
*
* The audio object exports the following functions:
*
* <pre>
* play()
* stop()
* release()
* enable()
* disable()
* setVolume(number)
* getDuration()
* isPaused()
* isEnabled()
* </pre>
*
* NOTE: the audio object should only be used, after the <code>onLoadedCallback</code>
* was triggered.
*
* @param {String} url
* @param {Function} [onPlayedCallback] OPTIONAL
* @param {Function} [failureCallback] OPTIONAL
* @param {Function} [onLoadedCallback] OPTIONAL
*
* @returns {mmir.env.media.IAudio} the audio
*
* @see {mmir.env.media.IAudio#_constructor}
*
* @memberOf mmir.MediaManager#
*/
getURLAsAudio: function(url, onPlayedCallback, failureCallback, onLoadedCallback){
var funcName = 'getURLAsAudio';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Output: create audio from URL is not supported.");
}
else {
logger.error("Audio Output: create audio from URL is not supported.");
}
},
/**
* Get an audio object for the audio data specified by a Blob.
*
* NOTE: the audio object should only be used, after the <code>onLoadedCallback</code>
* was triggered.
*
* NOTE: if {@link mmir.env.media.IAudio#play IAudio.play} is used, before <code>onCreatedCallback</code> has been called,
* the audio will start playing, when audio becomes ready.
*
* NOTE: if {@link mmir.env.media.IAudio#play IAudio.release}is used, before <code>onCreatedCallback</code> has been called,
* the initialization will be prematurely aborted
*
* @param {Blob} blob
* @param {Function} [onCreatedCallback] OPTIONAL callback for when audio data has been created
* @param {Function} [onPlayedCallback] OPTIONAL
* @param {Function} [failureCallback] OPTIONAL
* @param {Function} [onLoadedCallback] OPTIONAL
* @param {mmir.env.media.IAudio} [audioObj] OPTIONAL audio object to be used for loading the audio data:
* if specified, will also be returned
* (if omitted a new audio object will be created and returned)
*
* @returns {mmir.env.media.IAudio} the audio (is still "empty", will have be "filled" when onCreatedCallback is called)
*
* @see {mmir.env.media.IAudio#_constructor}
*
* @memberOf mmir.MediaManager#
*/
getWAVAsAudio: function(blob, onCreatedCallback, onPlayedCallback, failureCallback, onLoadedCallback, audioObj){
var funcName = 'getWAVAsAudio';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Output: create audio from Blob is not supported.");
}
else {
logger.error("Audio Output: create audio from Blob is not supported.");
}
},
/**
* Get an audio object for the audio file specified by URL (string) or by WAV (blob) data.
*
* NOTE that getWAVAsAudio may not be supported by all modules!
*
* Convenience function for {@link #getURLAsAudio} and {@link #getWAVAsAudio}:
* if first argument is a String, then <code>getURLAsAudio</code> will be invoked,
* otherwise <code>getWAVAsAudio</code> (if the module supports this function).
*
* @param {String | Blob} urlOrData
* @param {Function} [onPlayedCallback] OPTIONAL
* @param {Function} [failureCallback] OPTIONAL
* @param {Function} [onLoadedCallback] OPTIONAL
*
* @memberOf mmir.MediaManager#
* @see getURLAsAudio
* @see getWAVAsAudio
*/
getAudio: function(urlOrData, onPlayedCallback, failureCallback, onLoadedCallback){
if(typeof urlOrData === 'string'){
return this.getURLAsAudio.apply(this, arguments);
} else {
return this.getWAVAsAudio.apply(this, arguments);
}
},
/**
* Get an empty audio object. This can be used as dummy or placeholder
* for a "real" audio object.
*
* The audio object exports the following functions:
*
* <pre>
* play()
* stop()
* release()
* enable()
* disable()
* setVolume(number)
* getDuration()
* isPaused()
* isEnabled()
* </pre>
*
* Note:
*
* <code>enable()</code> and <code>disable()</code> will set the internal
* enabled-state, which can be queried via <code>isEnabled()</code>.
*
* <code>play()</code> and <code>stop()</code> will set the internal
* playing-state, which can be queried via <code>isPaused()</code>
* (note however, that this empty audio does not actually play anything.
*
* <code>setVolume()</code> sets the internal volume-value.
*
* <code>getDuration()</code> will always return <code>0</code>.
*
*
* @returns {mmir.env.media.IAudio} the audio
*
* @see {mmir.env.media.IAudio#_constructor}
* @memberOf mmir.MediaManager#
*/
createEmptyAudio: function(){
return {
_enabled: true,
_play: false,
_volume: 1,
play: function(){ this._play = true; return false;},
stop: function(){ this._play = false; return true;},
enable: function(){ this._enabled = true; },
disable: function(){ this._enabled = false; },
release: function(){ this._enabled = false; },
setVolume: function(vol){ this._volume = vol; },
getDuration: function(){ return 0; },
isPaused: function(){ return !this._play; },
isEnabled: function(){ return this._enabled; }
};
},
///////////////////////////// text-to-speech API: /////////////////////////////
/**
* Synthesizes ("read out loud") text.
*
* @param {String|Array<String>|PlainObject} [options] OPTIONAL
* if <code>String</code> or <code>Array</code> of <code>String</code>s
* synthesizes the text of the String(s).
* <br>For an Array: each entry is interpreted as "sentence";
* after each sentence, a short pause is inserted before synthesizing the
* the next sentence<br>
* for a <code>PlainObject</code>, the following properties should be used:
* <pre>{
* text: String | String[], text that should be read aloud
* , pauseDuration: OPTIONAL Number, the length of the pauses between sentences (i.e. for String Arrays) in milliseconds
* , language: OPTIONAL String, the language for synthesis (if omitted, the current language setting is used)
* , voice: OPTIONAL String, the voice (language specific) for synthesis; NOTE that the specific available voices depend on the TTS engine
* , success: OPTIONAL Function, the on-playing-completed callback (see arg onPlayedCallback)
* , error: OPTIONAL Function, the error callback (see arg failureCallback)
* , ready: OPTIONAL Function, the audio-ready callback (see arg onReadyCallback)
* }</pre>
*
* @param {Function} [onPlayedCallback] OPTIONAL
* callback that is invoked when the audio of the speech synthesis finished playing:
* <pre>onPlayedCallback()</pre>
*
* <br>NOTE: if used in combination with <code>options.success</code>, this argument will supersede the options
*
* @param {Function} [failureCallback] OPTIONAL
* callback that is invoked in case an error occurred:
* <pre>failureCallback(error: String | Error)</pre>
*
* <br>NOTE: if used in combination with <code>options.error</code>, this argument will supersede the options
*
* @param {Function} [onReadyCallback] OPTIONAL
* callback that is invoked when audio becomes ready / is starting to play.
* If, after the first invocation, audio is paused due to preparing the next audio,
* then the callback will be invoked with <code>false</code>, and then with <code>true</code>
* (as first argument), when the audio becomes ready again, i.e. the callback signature is:
* <pre>onReadyCallback(isReady: Boolean, audio: IAudio)</pre>
*
* <br>NOTE: if used in combination with <code>options.ready</code>, this argument will supersede the options
*
* @memberOf mmir.MediaManager#
*/
tts: function(options, onPlayedCallback, failureCallback, onReadyCallback){
if(typeof options === 'function'){
onInitCallback = failureCallback;
failureCallback = onPlayedCallback;
onPlayedCallback = options;
options = void(0);
}
var funcName = 'tts';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback || (failureCallback = (options && options.error))){
failureCallback("Audio Output: Text To Speech is not supported.");
}
else {
logger.error("Audio Output: Text To Speech is not supported.");
}
},
/**
* @deprecated use {@link #tts} instead
* @memberOf mmir.MediaManager#
*/
textToSpeech: function(parameter, onPlayedCallback, failureCallback){
var funcName = 'textToSpeech';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback || (failureCallback = (options && options.error))){
failureCallback("Audio Output: Text To Speech is not supported.");
}
else {
logger.error("Audio Output: Text To Speech is not supported.");
}
},
/**
* Cancel current synthesis.
*
* @memberOf mmir.MediaManager#
*/
cancelSpeech: function(successCallback,failureCallback){
var funcName = 'cancelSpeech';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Output: canceling Text To Speech is not supported.");
}
else {
logger.error("Audio Output: canceling Text To Speech is not supported.");
}
},
///////////////////////////// ADDITIONAL (optional) TTS functions: /////////////////////////////
/**
* get list of supported languages for TTS (may not be supported by all plugins).
*
* @memberOf mmir.MediaManager#
*/
getSpeechLanguages: function(successCallback,failureCallback){
var funcName = 'getSpeechLanguages';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){
return this.ctx[defaultExecId][funcName].apply(this, arguments);
}
else if(failureCallback){
failureCallback("Audio Output: retrieving list of available languages not supported.");
}
else {
logger.error("Audio Output: retrieving list of available languages not supported.");
}
},
/**
* get list of supported voices for TTS (may not be supported by all plugins).
*
* @param {String | VoiceOptions} [options] OPTIONAL if String, the language code (optionally with country code)
* for which the voices should be listed.
* if VoiceOptions:
* options.language: {String} OPTIONAL the language code
* options.details: {Boolean} OPTIONAL if TRUE the returned list contains
* VoiceDetail objects with
* {name: STRING, language: STRING, gender: "female" | "male" | "unknown"}
* @param {Function} successCallback the success callback: successCallback(Array<String | VoiceDetail>)
* @param {Function} failureCallback the error callback: failureCallback(err)
*
* @memberOf mmir.MediaManager#
*/
getVoices: function(options,successCallback,failureCallback){
var funcName = 'getVoices';
if(defaultExecId && typeof this.ctx[defaultExecId][funcName] !== 'undefined'){