@jxstjh/jhvideo
Version:
HTML5 jhvideo base on MPEG2-TS Stream Player
487 lines (446 loc) • 17.6 kB
text/typescript
import {callOnSvg, callOffSvg} from "./icons";
import {StreamOpt} from "../model/playerModel";
import {EventEmitter} from "events";
import PCMPlayer from 'pcm-player'
import {alaw, mulaw} from 'alawmulaw'
import httpClient from "../core/httpClient";
import PCMProcessor from './pcm-processor'
import AudioEncodeProcessor from './AudioEncodeProcessor.worklet.js'
import StreamWebsocket from "../stream/ws-loader";
import {ERRORMSG} from "./codemsg";
interface requestType {
url: string,
aisleId: string,
protocol: string
headers: {
"Content-Type": string
"Authorization": string
}
}
const code = {
OK: 0,
ARGUMENTS: 1, // 参数有误
UNSUPPORTED: 2, // 不支持
UNEXPECTED_STATUS: 3, // 意外的状态
CONNECT: 4, // 无法建立Websocket连接, URL有误,端口未开通,或者证书未安装
INTERRUPT: 5, // 流中断
}
const audioDefaultConfig = {
// 非压缩
'pcm': {
audioSample: 8000,
audioBitrate: 32000,
audioChannel: 1,
audioBitwidth: 16
},
//压缩(欧洲使用)
'g711a': {
audioSample: 8000,
audioBitrate: 64000,
audioChannel: 1,
audioBitwidth: 16
},
//压缩(北美和日本)
'g711u': {
audioSample: 8000,
audioBitrate: 64000,
audioChannel: 1,
audioBitwidth: 16
},
}
class TalkCtrl {
private autoTalk: boolean = false
_isCompatible:boolean = false
_pcmPlayer: PCMPlayer = null
_processor: any = null
_prefixName: string = ''
_talkDemo: HTMLElement = null
_talkTitle: string = null
_videoBox: Element = null
_talkBody: HTMLElement = null
_playUrl: string = ''
_talkStream: any = null
_decoder: any = null
_animationId: number = 0
// 请求信息
requestInfo: requestType
_capturer = {
audioStream: null,
audioContext: null,
audioSource: null,
audioNode: null,
scriptProcessor: null,
gainNode: null
}
_streamAudioInfo = {
audioType: 'g711a',
audioSample: 8000,
audioBitrate: 64000,
audioBitwidth: 16,
audioChannel: 1,
}
private eventList={
stopPropagation:(e)=>{
e.stopPropagation();
}
}
emitter: EventEmitter
constructor(stream?: StreamOpt, requestInfo?: requestType, prefixName: string = '',videoBox?:Element) {
this.emitter = new EventEmitter()
this.requestInfo = requestInfo
this._talkTitle = stream.title || ''
this._prefixName = prefixName
this._videoBox = videoBox
this._isCompatible = !!(window.navigator.mediaDevices && window.navigator.mediaDevices.getUserMedia)
}
on(event:any, listener:any) {
this.emitter.addListener(event, listener);
}
off(event:any, listener:any) {
this.emitter.removeListener(event, listener);
}
// 创建文件
async init() {
// 浏览器不支持
if(!this._isCompatible){
const className = '.jh-talk-btn'
const callWrapper = this._videoBox.querySelector(className)
callWrapper.setAttribute('aria-controls', '浏览器不支持对讲')
return
}
// 已存在demo
if(this._talkDemo){
return
}
// 创建demo
const talkDemo = document.createElement('div')
talkDemo.classList.add(this._prefixName + '-talk-wrapper')
this._videoBox.appendChild(talkDemo)
talkDemo.style.top = ( this._videoBox.clientHeight - 126 )+'px'
talkDemo.style.left = ( this._videoBox.clientWidth - 314 )+'px'
const talkCard = document.createElement('div')
talkCard.classList.add(`${this._prefixName}-talk-card`)
// 页头
const talkHead = document.createElement('div')
talkHead.classList.add(`${this._prefixName}-talk-head`)
talkHead.innerHTML = `<h3 class="talk-title">
<span class="talk-svg">${callOffSvg}</span>
${this._talkTitle}
</h3>`
// 内容
const talkBody = document.createElement('div')
talkBody.classList.add(`${this._prefixName}-talk-body`)
talkBody.innerHTML = '数据请求中....'
// 页脚
const talkFoot = document.createElement('div')
talkFoot.classList.add(`${this._prefixName}-talk-foot`)
talkFoot.innerHTML = `<button class="talk-start">停止</button>
<button class="talk-close">关闭</button>`
talkCard.appendChild(talkHead)
talkCard.appendChild(talkBody)
talkCard.appendChild(talkFoot)
talkDemo.appendChild(talkCard)
this._talkBody = talkBody
this._talkDemo = talkDemo
this.addEvent(talkHead,talkFoot)
this.open()
}
// 添加页面事件
addEvent(talkHead:HTMLElement,talkFoot:HTMLElement) {
let isHead = false,isBar = false
let dy=0,sy=0,dx=0,sx=0
const rec = this._talkDemo
const videoBox = this._videoBox
rec.addEventListener('click', (e)=>{
e.stopPropagation();
})
videoBox.addEventListener('mousemove', (e:any)=>{
e.stopPropagation();
// 录音文本框移动
if (isHead) {
rec.style.top = e.clientY - (dy - sy) + 'px';
rec.style.left = e.clientX - (dx - sx) + 'px';
}
})
videoBox.addEventListener('mouseup', (e)=>{
e.stopPropagation();
if (isHead) {
isHead = false;
}
if(isBar){
isBar = false
}
})
talkHead.addEventListener('mousedown', (e)=>{
dx = e.clientX;
dy = e.clientY;
sx = parseInt(rec.style.left);
sy = parseInt(rec.style.top);
if (!isHead) {
isHead = true;
}
})
talkFoot.querySelector(`.talk-start`).addEventListener('click', ()=>{
this.autoTalk ? this.stop(true) : this.open()
})
talkFoot.querySelector(`.talk-close`).addEventListener('click', ()=>{
this.destroy(true)
})
}
removeEvent () {
const talkDemo = this._talkDemo
const videoBox = this._videoBox
const talkHead = talkDemo.querySelector(`${this._prefixName}-talk-head`)
const talkFoot = talkDemo.querySelector(`${this._prefixName}-talk-foot`)
const talkStart = talkFoot.querySelector(`.talk-start`)
const talkClose = talkFoot.querySelector(`.talk-close`)
}
// 创建音频获取录音
async _loadCapturer() {
const constraints = {
audio: {
noiseSuppression: true, // 降噪
echoCancellation: true // 回声消除
},
video: false
}
try {
this._capturer.audioStream = await window.navigator.mediaDevices.getUserMedia(constraints) // 获取麦克风权限
console.log('capturer.audioStream:',this._capturer.audioStream)
} catch (err) {
console.error(err)
this.emitter.emit('error', code.UNSUPPORTED)
}
// 获取音频上下文
const audioContext = this._capturer.audioContext = new window.AudioContext()
//创建一个新的MediaStreamAudioSourceNode对象,该对象可以通过MediaStream对象来获取音频数据
// AudioWorkletNode生成音频数据 AudioWorkletNode是AudioNode的子类,它允许开发者在AudioWorklet中编写自定义的音频处理代码
if (window.AudioWorklet && window.AudioWorkletNode) {
console.info('0:')
// new-AudioWorkletNode
await audioContext.audioWorklet.addModule(AudioEncodeProcessor as any)
const audioSource = this._capturer.audioSource = audioContext.createMediaStreamSource(this._capturer.audioStream)
let processorOptions = {
fromSampleRate: audioContext.sampleRate,
...this._streamAudioInfo
}
// 创建一个新的AudioWorkletNode对象,该对象可以通过AudioWorkletProcessor对象来获取音频数据
let audioNode = this._capturer.audioNode = new AudioWorkletNode(audioContext, 'AudioEncodeProcessor', {processorOptions})
audioNode.port.onmessage = (evt) => {
// 处理编码后的音频数据
const bFileWrite = false
if (evt.data && bFileWrite && evt.data.type === 'pcm') {
// this._root._fileStorage.inputData(evt.data.pcmSamples)
} else if (evt.data && evt.data instanceof ArrayBuffer) {
this.emitter.emit('stream.output', evt.data)
} else {
console.log('got message from worklet', evt.type, evt.data)
}
}
audioSource.connect(audioNode)
audioNode.connect(audioContext.destination)
console.log('talk-success')
this.emitter.emit('success')
} else {
// old-ScriptProcessorNode
this._loadProcessor()
const audioSource = this._capturer.audioSource = audioContext.createMediaStreamSource(this._capturer.audioStream)
let scriptProcessor = this._capturer.scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1)
scriptProcessor.onaudioprocess = (evt) => {
// console.log(evt)
// PCM原始音频可在此录制
// write file...
/* 加工处理PCM音频数据,如消除白噪音,重采样,编码等 */
this._processor.input(evt.inputBuffer.getChannelData(0))
}
audioSource.connect(scriptProcessor)
scriptProcessor.connect(audioContext.destination)
this.emitter.emit('success')
}
// 绘制音频动效
this.audioAnimation()
}
// 创建音频动效
async audioAnimation(){
// 获取音频数据
const audioCtx = this._capturer.audioContext
// 创建一个新的 MediaStreamAudioSourceNode 实例
const source = this._capturer.audioSource;
// 创建一个新的 AnalyserNode 实例
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 512;
// 将 MediaStreamAudioSourceNode 连接到 AnalyserNode
source.connect(analyser);
// 创建canvas
const talkBody = this._talkBody
const canvas = document.createElement("canvas");
const ctx = canvas.getContext('2d');
canvas.width = talkBody.clientWidth;
canvas.height = talkBody.clientHeight;
talkBody.appendChild(canvas)
// 定义一个函数来绘制音频动画
const draw = () => {
// 获取音频数据
const dataArray = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteTimeDomainData(dataArray);
// 清除 Canvas
ctx.clearRect(0, 0, canvas.width, canvas.height);
// 绘制音频动画
ctx.beginPath();
for (let i = 0; i < dataArray.length; i++) {
const v = dataArray[i] / 96.0;
const y = v * canvas.height / 2;
if (i === 0) {
ctx.moveTo(i, y);
} else {
ctx.lineTo(i, y);
}
}
ctx.lineTo(canvas.width, canvas.height / 2);
ctx.strokeStyle = '#1aff8c';
ctx.stroke();
// 请求下一帧
this._animationId = requestAnimationFrame(draw);
};
draw();
}
// 页面处理
_loadProcessing(){
const { _talkDemo, autoTalk } =this
const start = _talkDemo.querySelector(`.talk-start`)
const svg = _talkDemo.querySelector(`.talk-svg`)
if(autoTalk){
start.innerHTML = '停止'
svg.innerHTML = callOnSvg
} else {
start.innerHTML = '开始'
svg.innerHTML = callOffSvg
}
}
/**
* pcm 音频数据加工处理
*/
_loadProcessor() {
this._processor = new PCMProcessor({
fromSampleRate: this._capturer.audioContext.sampleRate,
...this._streamAudioInfo
}, (samples) => {
this.emitter.emit('stream.output', samples)
})
}
_loadPlayer() {
let { audioType, audioSample, audioChannel } = this._streamAudioInfo
switch(audioType) {
case 'g711a':
this._decoder = alaw
break
case 'g711u':
this._decoder = mulaw
break
default: break
}
const obj:any = {
inputCodec: 'Int16',
channels: audioChannel,
sampleRate: audioSample,
flushTime: 500
}
this._pcmPlayer = new PCMPlayer(obj)
}
// 开流
async open() {
this._playUrl = await this.getUrl()
// console.log('this._playUrl:',this._playUrl)
this._talkStream = new StreamWebsocket(this._playUrl,'talk')
this._talkStream.on('stream.input', (inputBuf:any) => {
if (this._pcmPlayer) {
let pcmSamples = this._decoder.decode(new Uint8Array(inputBuf))
this._pcmPlayer.feed(pcmSamples)
}
})
this.emitter.on('stream.output', (outputBuf:any) => {
this._talkStream && this._talkStream.sendArrayBuffer(outputBuf)
})
this._talkStream.open().then(({ code, mediaInfo }) => {
let searchParams:any = new URLSearchParams(mediaInfo)
let audioType = this._streamAudioInfo['audioType']
if (searchParams.has('audioType')) {
audioType = this._streamAudioInfo['audioType'] = searchParams.get('audioType').toLocaleLowerCase()
}
for (let key of searchParams.keys()) {
if (key === 'audioType')
continue
let value = searchParams.get(key)
this._streamAudioInfo[key] = /^\d+$/.test(value) ? parseInt(value) : value
if (this._streamAudioInfo[key] === 0) {
this._streamAudioInfo[key] = audioDefaultConfig[audioType][key]
}
}
// console.log('_streamAudioInfo:',this._streamAudioInfo)
this.autoTalk = true
this._loadPlayer()
this._loadCapturer()
this._loadProcessing()
}).catch((errCode:any) => {
this.stop(true)
this._talkBody.innerHTML = ERRORMSG[errCode]
console.error(errCode)
})
}
async getUrl() {
const {url, aisleId, protocol, headers} = this.requestInfo
const res = await httpClient.get(url, {aisleId, protocol}, headers)
if (res.code !== 200) {
return res.data
}else {
return Promise.reject(res.msg)
}
}
// 停止喊会
stop(en=false){
this.autoTalk = false
if(en){
this._loadProcessing()
}
if (this._talkStream) {
this._talkStream.destroy()
this._talkStream = null
}
if (this._capturer.audioNode) {
this._capturer.audioSource.disconnect(this._capturer.audioNode);
this._capturer.audioNode.disconnect(this._capturer.audioContext.destination);
}
if (this._capturer.scriptProcessor) {
this._capturer.audioSource.disconnect(this._capturer.scriptProcessor)
this._capturer.scriptProcessor.disconnect(this._capturer.audioContext.destination)
}
this._capturer.audioContext && this._capturer.audioContext.close()
this._capturer.audioStream && this._capturer.audioStream.getTracks().forEach(track => track.stop())
Object.keys(this._capturer).forEach(key => {
this._capturer[key] = null
})
if(this._animationId){
cancelAnimationFrame(this._animationId);
}
if(this._talkBody && en){
const canvas = this._talkBody.querySelector('canvas')
canvas && canvas.parentNode.removeChild(canvas)
}
}
// 销毁
destroy(en=false) {
this.emitter && this.emitter.removeAllListeners()
this.stop(en)
if (this._pcmPlayer) {
this._pcmPlayer.destroy()
this._pcmPlayer = null
}
this._talkDemo && this._talkDemo.parentNode.removeChild( this._talkDemo)
this._talkDemo = null
}
// 浏览器是否支持
static talkIsSupported() {
return !(window.navigator.mediaDevices && window.navigator.mediaDevices.getUserMedia)
}
}
export default TalkCtrl