@onereach/step-voice
Version:
Onereach.ai Voice Steps
66 lines (65 loc) • 2.56 kB
JavaScript
;
/* eslint-disable @typescript-eslint/strict-boolean-expressions, @typescript-eslint/explicit-function-return-type */
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
const lodash_1 = tslib_1.__importDefault(require("lodash"));
const voice_1 = tslib_1.__importDefault(require("./voice"));
class SayMessage extends voice_1.default {
async runStep() {
const { audio, textType, tts, sensitiveData } = this.data;
const call = await this.fetchData();
const ttsSettings = tts.getSettings(call.tts);
const speechSections = lodash_1.default.map(audio, (section) => ({
text: section.voiceTextMsg,
url: section.audioUrl,
bargeInVoice: false,
bargeInKeypad: false,
textType,
provider: ttsSettings.provider,
...ttsSettings
}));
this.triggers.local(`in/voice/${call.id}`, async (event) => {
await this.handleInterruption({
call,
event,
speechSections,
reportingSettingsKey: 'transcript'
});
switch (event.params.type) {
case 'hangup':
await this.handleHangup(call);
return await this.waitConvEnd();
case 'playback':
await this.resumeRecording(call, sensitiveData);
return this.exitStep('next');
case 'error':
return this.throwError(event.params.error);
case 'cancel': {
return this.handleCancel();
}
default:
return this.exitFlow();
}
});
this.triggers.otherwise(async () => {
const command = {
name: 'speak',
params: {
sections: speechSections,
reporterTranscriptEventId: '',
useWhisperFeature: true
}
};
const eventId = await this.transcript(call, {
action: 'Call Prompt',
sections: command.params.sections,
reportingSettingsKey: 'transcript',
actionFromBot: true
});
command.params.reporterTranscriptEventId = eventId;
await this.pauseRecording(call, command, sensitiveData);
return this.exitFlow();
});
}
}
exports.default = SayMessage;