videojs-contrib-hls
Version:
Play back HLS with video.js, even where it's not natively supported.
1,706 lines (1,454 loc) • 374 kB
JavaScript
/*! videojs-contrib-hls - v1.3.10 - 2016-03-01
* Copyright (c) 2016 Brightcove; Licensed */
/*! videojs-contrib-media-sources - v2.4.6 - 2016-03-01
* Copyright (c) 2016 Brightcove; Licensed */
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.muxjs = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
/**
* mux.js
*
* Copyright (c) 2016 Brightcove
* All rights reserved.
*
* A stream-based aac to mp4 converter. This utility can be used to
* deliver mp4s to a SourceBuffer on platforms that support native
* Media Source Extensions.
*/
'use strict';
var Stream = require('../utils/stream.js');
// Constants
var AacStream;
/**
* Splits an incoming stream of binary data into ADTS and ID3 Frames.
*/
AacStream = function() {
var
everything,
receivedTimeStamp = false,
timeStamp = 0;
AacStream.prototype.init.call(this);
this.setTimestamp = function (timestamp) {
timeStamp = timestamp;
};
this.parseId3TagSize = function(header, byteIndex) {
var
returnSize = (header[byteIndex + 6] << 21) |
(header[byteIndex + 7] << 14) |
(header[byteIndex + 8] << 7) |
(header[byteIndex + 9]),
flags = header[byteIndex + 5],
footerPresent = (flags & 16) >> 4;
if (footerPresent) {
return returnSize + 20;
}
return returnSize + 10;
};
this.parseAdtsSize = function(header, byteIndex) {
var
lowThree = (header[byteIndex + 5] & 0xE0) >> 5,
middle = header[byteIndex + 4] << 3,
highTwo = header[byteIndex + 3] & 0x3 << 11;
return (highTwo | middle) | lowThree;
};
this.push = function(bytes) {
var
frameSize = 0,
byteIndex = 0,
chunk,
packet,
tempLength;
// If there are bytes remaining from the last segment, prepend them to the
// bytes that were pushed in
if (everything !== undefined && everything.length) {
tempLength = everything.length;
everything = new Uint8Array(bytes.byteLength + tempLength);
everything.set(everything.subarray(0, tempLength));
everything.set(bytes, tempLength);
} else {
everything = bytes;
}
while (everything.length - byteIndex >= 10) {
if ((everything[byteIndex] === 'I'.charCodeAt(0)) &&
(everything[byteIndex + 1] === 'D'.charCodeAt(0)) &&
(everything[byteIndex + 2] === '3'.charCodeAt(0))) {
//check framesize
frameSize = this.parseId3TagSize(everything, byteIndex);
//we have enough in the buffer to emit a full packet
if (frameSize > everything.length) {
break;
}
chunk = {
type: 'timed-metadata',
data: everything.subarray(byteIndex, byteIndex + frameSize)
};
this.trigger('data', chunk);
byteIndex += frameSize;
continue;
} else if ((everything[byteIndex] & 0xff === 0xff) &&
((everything[byteIndex + 1] & 0xf0) === 0xf0)) {
frameSize = this.parseAdtsSize(everything, byteIndex);
if (frameSize > everything.length) {
break;
}
packet = {
type: 'audio',
data: everything.subarray(byteIndex, byteIndex + frameSize),
pts: timeStamp,
dts: timeStamp,
};
this.trigger('data', packet);
byteIndex += frameSize;
continue;
}
byteIndex++;
}
};
};
AacStream.prototype = new Stream();
module.exports = AacStream;
},{"../utils/stream.js":20}],2:[function(require,module,exports){
'use strict';
var Stream = require('../utils/stream.js');
var AdtsStream;
var
ADTS_SAMPLING_FREQUENCIES = [
96000,
88200,
64000,
48000,
44100,
32000,
24000,
22050,
16000,
12000,
11025,
8000,
7350
];
/*
* Accepts a ElementaryStream and emits data events with parsed
* AAC Audio Frames of the individual packets. Input audio in ADTS
* format is unpacked and re-emitted as AAC frames.
*
* @see http://wiki.multimedia.cx/index.php?title=ADTS
* @see http://wiki.multimedia.cx/?title=Understanding_AAC
*/
AdtsStream = function() {
var self, buffer;
AdtsStream.prototype.init.call(this);
self = this;
this.push = function(packet) {
var
i = 0,
frameNum = 0,
frameLength,
protectionSkipBytes,
frameEnd,
oldBuffer,
numFrames,
sampleCount,
adtsFrameDuration;
if (packet.type !== 'audio') {
// ignore non-audio data
return;
}
// Prepend any data in the buffer to the input data so that we can parse
// aac frames the cross a PES packet boundary
if (buffer) {
oldBuffer = buffer;
buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength);
buffer.set(oldBuffer);
buffer.set(packet.data, oldBuffer.byteLength);
} else {
buffer = packet.data;
}
// unpack any ADTS frames which have been fully received
// for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS
while (i + 5 < buffer.length) {
// Loook for the start of an ADTS header..
if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) {
// If a valid header was not found, jump one forward and attempt to
// find a valid ADTS header starting at the next byte
i++;
continue;
}
// The protection skip bit tells us if we have 2 bytes of CRC data at the
// end of the ADTS header
protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2;
// Frame length is a 13 bit integer starting 16 bits from the
// end of the sync sequence
frameLength = ((buffer[i + 3] & 0x03) << 11) |
(buffer[i + 4] << 3) |
((buffer[i + 5] & 0xe0) >> 5);
sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024;
adtsFrameDuration = (sampleCount * 90000) /
ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2];
frameEnd = i + frameLength;
// If we don't have enough data to actually finish this ADTS frame, return
// and wait for more data
if (buffer.byteLength < frameEnd) {
return;
}
// Otherwise, deliver the complete AAC frame
this.trigger('data', {
pts: packet.pts + (frameNum * adtsFrameDuration),
dts: packet.dts + (frameNum * adtsFrameDuration),
sampleCount: sampleCount,
audioobjecttype: ((buffer[i + 2] >>> 6) & 0x03) + 1,
channelcount: ((buffer[i + 2] & 1) << 3) |
((buffer[i + 3] & 0xc0) >>> 6),
samplerate: ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2],
samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2,
// assume ISO/IEC 14496-12 AudioSampleEntry default of 16
samplesize: 16,
data: buffer.subarray(i + 7 + protectionSkipBytes, frameEnd)
});
// If the buffer is empty, clear it and return
if (buffer.byteLength === frameEnd) {
buffer = undefined;
return;
}
frameNum++;
// Remove the finished frame from the buffer and start the process again
buffer = buffer.subarray(frameEnd);
}
};
this.flush = function() {
this.trigger('done');
};
};
AdtsStream.prototype = new Stream();
module.exports = AdtsStream;
},{"../utils/stream.js":20}],3:[function(require,module,exports){
'use strict';
var Stream = require('../utils/stream.js');
var ExpGolomb = require('../utils/exp-golomb.js');
var H264Stream, NalByteStream;
/**
* Accepts a NAL unit byte stream and unpacks the embedded NAL units.
*/
NalByteStream = function() {
var
syncPoint = 0,
i,
buffer;
NalByteStream.prototype.init.call(this);
this.push = function(data) {
var swapBuffer;
if (!buffer) {
buffer = data.data;
} else {
swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength);
swapBuffer.set(buffer);
swapBuffer.set(data.data, buffer.byteLength);
buffer = swapBuffer;
}
// Rec. ITU-T H.264, Annex B
// scan for NAL unit boundaries
// a match looks like this:
// 0 0 1 .. NAL .. 0 0 1
// ^ sync point ^ i
// or this:
// 0 0 1 .. NAL .. 0 0 0
// ^ sync point ^ i
// advance the sync point to a NAL start, if necessary
for (; syncPoint < buffer.byteLength - 3; syncPoint++) {
if (buffer[syncPoint + 2] === 1) {
// the sync point is properly aligned
i = syncPoint + 5;
break;
}
}
while (i < buffer.byteLength) {
// look at the current byte to determine if we've hit the end of
// a NAL unit boundary
switch (buffer[i]) {
case 0:
// skip past non-sync sequences
if (buffer[i - 1] !== 0) {
i += 2;
break;
} else if (buffer[i - 2] !== 0) {
i++;
break;
}
// deliver the NAL unit
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));
// drop trailing zeroes
do {
i++;
} while (buffer[i] !== 1 && i < buffer.length);
syncPoint = i - 2;
i += 3;
break;
case 1:
// skip past non-sync sequences
if (buffer[i - 1] !== 0 ||
buffer[i - 2] !== 0) {
i += 3;
break;
}
// deliver the NAL unit
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));
syncPoint = i - 2;
i += 3;
break;
default:
// the current byte isn't a one or zero, so it cannot be part
// of a sync sequence
i += 3;
break;
}
}
// filter out the NAL units that were delivered
buffer = buffer.subarray(syncPoint);
i -= syncPoint;
syncPoint = 0;
};
this.flush = function() {
// deliver the last buffered NAL unit
if (buffer && buffer.byteLength > 3) {
this.trigger('data', buffer.subarray(syncPoint + 3));
}
// reset the stream state
buffer = null;
syncPoint = 0;
this.trigger('done');
};
};
NalByteStream.prototype = new Stream();
/**
* Accepts input from a ElementaryStream and produces H.264 NAL unit data
* events.
*/
H264Stream = function() {
var
nalByteStream = new NalByteStream(),
self,
trackId,
currentPts,
currentDts,
discardEmulationPreventionBytes,
readSequenceParameterSet,
skipScalingList;
H264Stream.prototype.init.call(this);
self = this;
this.push = function(packet) {
if (packet.type !== 'video') {
return;
}
trackId = packet.trackId;
currentPts = packet.pts;
currentDts = packet.dts;
nalByteStream.push(packet);
};
nalByteStream.on('data', function(data) {
var
event = {
trackId: trackId,
pts: currentPts,
dts: currentDts,
data: data
};
switch (data[0] & 0x1f) {
case 0x05:
event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr';
break;
case 0x06:
event.nalUnitType = 'sei_rbsp';
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));
break;
case 0x07:
event.nalUnitType = 'seq_parameter_set_rbsp';
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));
event.config = readSequenceParameterSet(event.escapedRBSP);
break;
case 0x08:
event.nalUnitType = 'pic_parameter_set_rbsp';
break;
case 0x09:
event.nalUnitType = 'access_unit_delimiter_rbsp';
break;
default:
break;
}
self.trigger('data', event);
});
nalByteStream.on('done', function() {
self.trigger('done');
});
this.flush = function() {
nalByteStream.flush();
};
/**
* Advance the ExpGolomb decoder past a scaling list. The scaling
* list is optionally transmitted as part of a sequence parameter
* set and is not relevant to transmuxing.
* @param count {number} the number of entries in this scaling list
* @param expGolombDecoder {object} an ExpGolomb pointed to the
* start of a scaling list
* @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1
*/
skipScalingList = function(count, expGolombDecoder) {
var
lastScale = 8,
nextScale = 8,
j,
deltaScale;
for (j = 0; j < count; j++) {
if (nextScale !== 0) {
deltaScale = expGolombDecoder.readExpGolomb();
nextScale = (lastScale + deltaScale + 256) % 256;
}
lastScale = (nextScale === 0) ? lastScale : nextScale;
}
};
/**
* Expunge any "Emulation Prevention" bytes from a "Raw Byte
* Sequence Payload"
* @param data {Uint8Array} the bytes of a RBSP from a NAL
* unit
* @return {Uint8Array} the RBSP without any Emulation
* Prevention Bytes
*/
discardEmulationPreventionBytes = function(data) {
var
length = data.byteLength,
emulationPreventionBytesPositions = [],
i = 1,
newLength, newData;
// Find all `Emulation Prevention Bytes`
while (i < length - 2) {
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
emulationPreventionBytesPositions.push(i + 2);
i += 2;
} else {
i++;
}
}
// If no Emulation Prevention Bytes were found just return the original
// array
if (emulationPreventionBytesPositions.length === 0) {
return data;
}
// Create a new array to hold the NAL unit data
newLength = length - emulationPreventionBytesPositions.length;
newData = new Uint8Array(newLength);
var sourceIndex = 0;
for (i = 0; i < newLength; sourceIndex++, i++) {
if (sourceIndex === emulationPreventionBytesPositions[0]) {
// Skip this byte
sourceIndex++;
// Remove this position index
emulationPreventionBytesPositions.shift();
}
newData[i] = data[sourceIndex];
}
return newData;
};
/**
* Read a sequence parameter set and return some interesting video
* properties. A sequence parameter set is the H264 metadata that
* describes the properties of upcoming video frames.
* @param data {Uint8Array} the bytes of a sequence parameter set
* @return {object} an object with configuration parsed from the
* sequence parameter set, including the dimensions of the
* associated video frames.
*/
readSequenceParameterSet = function(data) {
var
frameCropLeftOffset = 0,
frameCropRightOffset = 0,
frameCropTopOffset = 0,
frameCropBottomOffset = 0,
expGolombDecoder, profileIdc, levelIdc, profileCompatibility,
chromaFormatIdc, picOrderCntType,
numRefFramesInPicOrderCntCycle, picWidthInMbsMinus1,
picHeightInMapUnitsMinus1,
frameMbsOnlyFlag,
scalingListCount,
i;
expGolombDecoder = new ExpGolomb(data);
profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc
profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag
levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8)
expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id
// some profiles have more optional data we don't need
if (profileIdc === 100 ||
profileIdc === 110 ||
profileIdc === 122 ||
profileIdc === 244 ||
profileIdc === 44 ||
profileIdc === 83 ||
profileIdc === 86 ||
profileIdc === 118 ||
profileIdc === 128 ||
profileIdc === 138 ||
profileIdc === 139 ||
profileIdc === 134) {
chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb();
if (chromaFormatIdc === 3) {
expGolombDecoder.skipBits(1); // separate_colour_plane_flag
}
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8
expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag
if (expGolombDecoder.readBoolean()) { // seq_scaling_matrix_present_flag
scalingListCount = (chromaFormatIdc !== 3) ? 8 : 12;
for (i = 0; i < scalingListCount; i++) {
if (expGolombDecoder.readBoolean()) { // seq_scaling_list_present_flag[ i ]
if (i < 6) {
skipScalingList(16, expGolombDecoder);
} else {
skipScalingList(64, expGolombDecoder);
}
}
}
}
}
expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4
picOrderCntType = expGolombDecoder.readUnsignedExpGolomb();
if (picOrderCntType === 0) {
expGolombDecoder.readUnsignedExpGolomb(); //log2_max_pic_order_cnt_lsb_minus4
} else if (picOrderCntType === 1) {
expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag
expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic
expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field
numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb();
for(i = 0; i < numRefFramesInPicOrderCntCycle; i++) {
expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ]
}
}
expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames
expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag
picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb();
picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb();
frameMbsOnlyFlag = expGolombDecoder.readBits(1);
if (frameMbsOnlyFlag === 0) {
expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag
}
expGolombDecoder.skipBits(1); // direct_8x8_inference_flag
if (expGolombDecoder.readBoolean()) { // frame_cropping_flag
frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb();
frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb();
frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb();
frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb();
}
return {
profileIdc: profileIdc,
levelIdc: levelIdc,
profileCompatibility: profileCompatibility,
width: ((picWidthInMbsMinus1 + 1) * 16) - frameCropLeftOffset * 2 - frameCropRightOffset * 2,
height: ((2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16) - (frameCropTopOffset * 2) - (frameCropBottomOffset * 2)
};
};
};
H264Stream.prototype = new Stream();
module.exports = {
H264Stream: H264Stream,
NalByteStream: NalByteStream,
};
},{"../utils/exp-golomb.js":19,"../utils/stream.js":20}],4:[function(require,module,exports){
module.exports = {
adts: require('./adts'),
h264: require('./h264'),
};
},{"./adts":2,"./h264":3}],5:[function(require,module,exports){
/**
* An object that stores the bytes of an FLV tag and methods for
* querying and manipulating that data.
* @see http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf
*/
'use strict';
var FlvTag;
// (type:uint, extraData:Boolean = false) extends ByteArray
FlvTag = function(type, extraData) {
var
// Counter if this is a metadata tag, nal start marker if this is a video
// tag. unused if this is an audio tag
adHoc = 0, // :uint
// The default size is 16kb but this is not enough to hold iframe
// data and the resizing algorithm costs a bit so we create a larger
// starting buffer for video tags
bufferStartSize = 16384,
// checks whether the FLV tag has enough capacity to accept the proposed
// write and re-allocates the internal buffers if necessary
prepareWrite = function(flv, count) {
var
bytes,
minLength = flv.position + count;
if (minLength < flv.bytes.byteLength) {
// there's enough capacity so do nothing
return;
}
// allocate a new buffer and copy over the data that will not be modified
bytes = new Uint8Array(minLength * 2);
bytes.set(flv.bytes.subarray(0, flv.position), 0);
flv.bytes = bytes;
flv.view = new DataView(flv.bytes.buffer);
},
// commonly used metadata properties
widthBytes = FlvTag.widthBytes || new Uint8Array('width'.length),
heightBytes = FlvTag.heightBytes || new Uint8Array('height'.length),
videocodecidBytes = FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length),
i;
if (!FlvTag.widthBytes) {
// calculating the bytes of common metadata names ahead of time makes the
// corresponding writes faster because we don't have to loop over the
// characters
// re-test with test/perf.html if you're planning on changing this
for (i = 0; i < 'width'.length; i++) {
widthBytes[i] = 'width'.charCodeAt(i);
}
for (i = 0; i < 'height'.length; i++) {
heightBytes[i] = 'height'.charCodeAt(i);
}
for (i = 0; i < 'videocodecid'.length; i++) {
videocodecidBytes[i] = 'videocodecid'.charCodeAt(i);
}
FlvTag.widthBytes = widthBytes;
FlvTag.heightBytes = heightBytes;
FlvTag.videocodecidBytes = videocodecidBytes;
}
this.keyFrame = false; // :Boolean
switch(type) {
case FlvTag.VIDEO_TAG:
this.length = 16;
// Start the buffer at 256k
bufferStartSize *= 6;
break;
case FlvTag.AUDIO_TAG:
this.length = 13;
this.keyFrame = true;
break;
case FlvTag.METADATA_TAG:
this.length = 29;
this.keyFrame = true;
break;
default:
throw("Error Unknown TagType");
}
this.bytes = new Uint8Array(bufferStartSize);
this.view = new DataView(this.bytes.buffer);
this.bytes[0] = type;
this.position = this.length;
this.keyFrame = extraData; // Defaults to false
// presentation timestamp
this.pts = 0;
// decoder timestamp
this.dts = 0;
// ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0)
this.writeBytes = function(bytes, offset, length) {
var
start = offset || 0,
end;
length = length || bytes.byteLength;
end = start + length;
prepareWrite(this, length);
this.bytes.set(bytes.subarray(start, end), this.position);
this.position += length;
this.length = Math.max(this.length, this.position);
};
// ByteArray#writeByte(value:int):void
this.writeByte = function(byte) {
prepareWrite(this, 1);
this.bytes[this.position] = byte;
this.position++;
this.length = Math.max(this.length, this.position);
};
// ByteArray#writeShort(value:int):void
this.writeShort = function(short) {
prepareWrite(this, 2);
this.view.setUint16(this.position, short);
this.position += 2;
this.length = Math.max(this.length, this.position);
};
// Negative index into array
// (pos:uint):int
this.negIndex = function(pos) {
return this.bytes[this.length - pos];
};
// The functions below ONLY work when this[0] == VIDEO_TAG.
// We are not going to check for that because we dont want the overhead
// (nal:ByteArray = null):int
this.nalUnitSize = function() {
if (adHoc === 0) {
return 0;
}
return this.length - (adHoc + 4);
};
this.startNalUnit = function() {
// remember position and add 4 bytes
if (adHoc > 0) {
throw new Error("Attempted to create new NAL wihout closing the old one");
}
// reserve 4 bytes for nal unit size
adHoc = this.length;
this.length += 4;
this.position = this.length;
};
// (nal:ByteArray = null):void
this.endNalUnit = function(nalContainer) {
var
nalStart, // :uint
nalLength; // :uint
// Rewind to the marker and write the size
if (this.length === adHoc + 4) {
// we started a nal unit, but didnt write one, so roll back the 4 byte size value
this.length -= 4;
} else if (adHoc > 0) {
nalStart = adHoc + 4;
nalLength = this.length - nalStart;
this.position = adHoc;
this.view.setUint32(this.position, nalLength);
this.position = this.length;
if (nalContainer) {
// Add the tag to the NAL unit
nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength));
}
}
adHoc = 0;
};
/**
* Write out a 64-bit floating point valued metadata property. This method is
* called frequently during a typical parse and needs to be fast.
*/
// (key:String, val:Number):void
this.writeMetaDataDouble = function(key, val) {
var i;
prepareWrite(this, 2 + key.length + 9);
// write size of property name
this.view.setUint16(this.position, key.length);
this.position += 2;
// this next part looks terrible but it improves parser throughput by
// 10kB/s in my testing
// write property name
if (key === 'width') {
this.bytes.set(widthBytes, this.position);
this.position += 5;
} else if (key === 'height') {
this.bytes.set(heightBytes, this.position);
this.position += 6;
} else if (key === 'videocodecid') {
this.bytes.set(videocodecidBytes, this.position);
this.position += 12;
} else {
for (i = 0; i < key.length; i++) {
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
}
// skip null byte
this.position++;
// write property value
this.view.setFloat64(this.position, val);
this.position += 8;
// update flv tag length
this.length = Math.max(this.length, this.position);
++adHoc;
};
// (key:String, val:Boolean):void
this.writeMetaDataBoolean = function(key, val) {
var i;
prepareWrite(this, 2);
this.view.setUint16(this.position, key.length);
this.position += 2;
for (i = 0; i < key.length; i++) {
// if key.charCodeAt(i) >= 255, handle error
prepareWrite(this, 1);
this.bytes[this.position] = key.charCodeAt(i);
this.position++;
}
prepareWrite(this, 2);
this.view.setUint8(this.position, 0x01);
this.position++;
this.view.setUint8(this.position, val ? 0x01 : 0x00);
this.position++;
this.length = Math.max(this.length, this.position);
++adHoc;
};
// ():ByteArray
this.finalize = function() {
var
dtsDelta, // :int
len; // :int
switch(this.bytes[0]) {
// Video Data
case FlvTag.VIDEO_TAG:
this.bytes[11] = ((this.keyFrame || extraData) ? 0x10 : 0x20 ) | 0x07; // We only support AVC, 1 = key frame (for AVC, a seekable frame), 2 = inter frame (for AVC, a non-seekable frame)
this.bytes[12] = extraData ? 0x00 : 0x01;
dtsDelta = this.pts - this.dts;
this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16;
this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8;
this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0;
break;
case FlvTag.AUDIO_TAG:
this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo
this.bytes[12] = extraData ? 0x00 : 0x01;
break;
case FlvTag.METADATA_TAG:
this.position = 11;
this.view.setUint8(this.position, 0x02); // String type
this.position++;
this.view.setUint16(this.position, 0x0A); // 10 Bytes
this.position += 2;
// set "onMetaData"
this.bytes.set([0x6f, 0x6e, 0x4d, 0x65,
0x74, 0x61, 0x44, 0x61,
0x74, 0x61], this.position);
this.position += 10;
this.bytes[this.position] = 0x08; // Array type
this.position++;
this.view.setUint32(this.position, adHoc);
this.position = this.length;
this.bytes.set([0, 0, 9], this.position);
this.position += 3; // End Data Tag
this.length = this.position;
break;
}
len = this.length - 11;
// write the DataSize field
this.bytes[ 1] = (len & 0x00FF0000) >>> 16;
this.bytes[ 2] = (len & 0x0000FF00) >>> 8;
this.bytes[ 3] = (len & 0x000000FF) >>> 0;
// write the Timestamp
this.bytes[ 4] = (this.dts & 0x00FF0000) >>> 16;
this.bytes[ 5] = (this.dts & 0x0000FF00) >>> 8;
this.bytes[ 6] = (this.dts & 0x000000FF) >>> 0;
this.bytes[ 7] = (this.dts & 0xFF000000) >>> 24;
// write the StreamID
this.bytes[ 8] = 0;
this.bytes[ 9] = 0;
this.bytes[10] = 0;
// Sometimes we're at the end of the view and have one slot to write a
// uint32, so, prepareWrite of count 4, since, view is uint8
prepareWrite(this, 4);
this.view.setUint32(this.length, this.length);
this.length += 4;
this.position += 4;
// trim down the byte buffer to what is actually being used
this.bytes = this.bytes.subarray(0, this.length);
this.frameTime = FlvTag.frameTime(this.bytes);
// if bytes.bytelength isn't equal to this.length, handle error
return this;
};
};
FlvTag.AUDIO_TAG = 0x08; // == 8, :uint
FlvTag.VIDEO_TAG = 0x09; // == 9, :uint
FlvTag.METADATA_TAG = 0x12; // == 18, :uint
// (tag:ByteArray):Boolean {
FlvTag.isAudioFrame = function(tag) {
return FlvTag.AUDIO_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isVideoFrame = function(tag) {
return FlvTag.VIDEO_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isMetaData = function(tag) {
return FlvTag.METADATA_TAG === tag[0];
};
// (tag:ByteArray):Boolean {
FlvTag.isKeyFrame = function(tag) {
if (FlvTag.isVideoFrame(tag)) {
return tag[11] === 0x17;
}
if (FlvTag.isAudioFrame(tag)) {
return true;
}
if (FlvTag.isMetaData(tag)) {
return true;
}
return false;
};
// (tag:ByteArray):uint {
FlvTag.frameTime = function(tag) {
var pts = tag[ 4] << 16; // :uint
pts |= tag[ 5] << 8;
pts |= tag[ 6] << 0;
pts |= tag[ 7] << 24;
return pts;
};
module.exports = FlvTag;
},{}],6:[function(require,module,exports){
module.exports = {
tag: require('./flv-tag'),
Transmuxer: require('./transmuxer'),
tools: require('../tools/flv-inspector'),
};
},{"../tools/flv-inspector":17,"./flv-tag":5,"./transmuxer":7}],7:[function(require,module,exports){
'use strict';
var Stream = require('../utils/stream.js');
var FlvTag = require('./flv-tag.js');
var m2ts = require('../m2ts/m2ts.js');
var AdtsStream = require('../codecs/adts.js');
var H264Stream = require('../codecs/h264').H264Stream;
var
MetadataStream,
Transmuxer,
VideoSegmentStream,
AudioSegmentStream,
CoalesceStream,
collectTimelineInfo,
metaDataTag,
extraDataTag;
/**
* Store information about the start and end of the tracka and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
collectTimelineInfo = function (track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
} else {
track.timelineStartInfo.pts =
Math.min(track.timelineStartInfo.pts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
} else {
track.timelineStartInfo.dts =
Math.min(track.timelineStartInfo.dts, data.dts);
}
}
};
metaDataTag = function(track, pts) {
var
tag = new FlvTag(FlvTag.METADATA_TAG); // :FlvTag
tag.dts = pts;
tag.pts = pts;
tag.writeMetaDataDouble("videocodecid", 7);
tag.writeMetaDataDouble("width", track.width);
tag.writeMetaDataDouble("height", track.height);
return tag;
};
extraDataTag = function(track, pts) {
var
i,
tag = new FlvTag(FlvTag.VIDEO_TAG, true);
tag.dts = pts;
tag.pts = pts;
tag.writeByte(0x01);// version
tag.writeByte(track.profileIdc);// profile
tag.writeByte(track.profileCompatibility);// compatibility
tag.writeByte(track.levelIdc);// level
tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits)
tag.writeByte(0xE0 | 0x01 ); // reserved (3 bits), num of SPS (5 bits)
tag.writeShort( track.sps[0].length ); // data of SPS
tag.writeBytes( track.sps[0] ); // SPS
tag.writeByte(track.pps.length); // num of PPS (will there ever be more that 1 PPS?)
for (i = 0 ; i < track.pps.length ; ++i) {
tag.writeShort(track.pps[i].length); // 2 bytes for length of PPS
tag.writeBytes(track.pps[i]); // data of PPS
}
return tag;
};
/**
* Constructs a single-track, media segment from AAC data
* events. The output of this stream can be fed to flash.
*/
AudioSegmentStream = function(track) {
var
adtsFrames = [],
adtsFramesLength = 0,
sequenceNumber = 0,
earliestAllowedDts = 0,
oldExtraData;
AudioSegmentStream.prototype.init.call(this);
this.push = function(data) {
collectTimelineInfo(track, data);
if (track && track.channelcount === undefined) {
track.audioobjecttype = data.audioobjecttype;
track.channelcount = data.channelcount;
track.samplerate = data.samplerate;
track.samplingfrequencyindex = data.samplingfrequencyindex;
track.samplesize = data.samplesize;
track.extraData = (track.audioobjecttype << 11) |
(track.samplingfrequencyindex << 7) |
(track.channelcount << 3);
}
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90);
// buffer audio data until end() is called
adtsFrames.push(data);
};
this.flush = function() {
var currentFrame, adtsFrame, deltaDts,lastMetaPts, tags = [];
// return early if no audio data has been observed
if (adtsFrames.length === 0) {
this.trigger('done');
return;
}
lastMetaPts = -Infinity;
while (adtsFrames.length) {
currentFrame = adtsFrames.shift();
// write out metadata tags every 1 second so that the decoder
// is re-initialized quickly after seeking into a different
// audio configuration
if (track.extraData !== oldExtraData || currentFrame.pts - lastMetaPts >= 1000) {
adtsFrame = new FlvTag(FlvTag.METADATA_TAG);
adtsFrame.pts = currentFrame.pts;
adtsFrame.dts = currentFrame.dts;
// AAC is always 10
adtsFrame.writeMetaDataDouble("audiocodecid", 10);
adtsFrame.writeMetaDataBoolean("stereo", 2 === track.channelcount);
adtsFrame.writeMetaDataDouble ("audiosamplerate", track.samplerate);
// Is AAC always 16 bit?
adtsFrame.writeMetaDataDouble ("audiosamplesize", 16);
tags.push(adtsFrame);
oldExtraData = track.extraData;
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG, true);
// For audio, DTS is always the same as PTS. We want to set the DTS
// however so we can compare with video DTS to determine approximate
// packet order
adtsFrame.pts = currentFrame.pts;
adtsFrame.dts = currentFrame.dts;
adtsFrame.view.setUint16(adtsFrame.position, track.extraData);
adtsFrame.position += 2;
adtsFrame.length = Math.max(adtsFrame.length, adtsFrame.position);
tags.push(adtsFrame);
lastMetaPts = currentFrame.pts;
}
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG);
adtsFrame.pts = currentFrame.pts;
adtsFrame.dts = currentFrame.dts;
adtsFrame.writeBytes(currentFrame.data);
tags.push(adtsFrame);
}
oldExtraData = null;
this.trigger('data', {track: track, tags: tags});
this.trigger('done');
};
};
AudioSegmentStream.prototype = new Stream();
/**
* Store FlvTags for the h264 stream
* @param track {object} track metadata configuration
*/
VideoSegmentStream = function(track) {
var
sequenceNumber = 0,
nalUnits = [],
nalUnitsLength = 0,
config,
h264Frame;
VideoSegmentStream.prototype.init.call(this);
this.finishFrame = function(tags, frame) {
if (!frame) {
return;
}
// Check if keyframe and the length of tags.
// This makes sure we write metadata on the first frame of a segment.
if (config && track && track.newMetadata &&
(frame.keyFrame || tags.length === 0)) {
// Push extra data on every IDR frame in case we did a stream change + seek
tags.push(metaDataTag(config, frame.pts));
tags.push(extraDataTag(track, frame.pts));
track.newMetadata = false;
}
frame.endNalUnit();
tags.push(frame);
};
this.push = function(data) {
collectTimelineInfo(track, data);
data.pts = Math.round(data.pts / 90);
data.dts = Math.round(data.dts / 90);
// buffer video until flush() is called
nalUnits.push(data);
};
this.flush = function() {
var
currentNal,
tags = [];
// Throw away nalUnits at the start of the byte stream until we find
// the first AUD
while (nalUnits.length) {
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
break;
}
nalUnits.shift();
}
// return early if no video data has been observed
if (nalUnits.length === 0) {
this.trigger('done');
return;
}
while (nalUnits.length) {
currentNal = nalUnits.shift();
// record the track config
if (currentNal.nalUnitType === 'seq_parameter_set_rbsp') {
track.newMetadata = true;
config = currentNal.config;
track.width = config.width;
track.height = config.height;
track.sps = [currentNal.data];
track.profileIdc = config.profileIdc;
track.levelIdc = config.levelIdc;
track.profileCompatibility = config.profileCompatibility;
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'pic_parameter_set_rbsp') {
track.newMetadata = true;
track.pps = [currentNal.data];
h264Frame.endNalUnit();
} else if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
h264Frame = new FlvTag(FlvTag.VIDEO_TAG);
h264Frame.pts = currentNal.pts;
h264Frame.dts = currentNal.dts;
} else {
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
// the current sample is a key frame
h264Frame.keyFrame = true;
}
h264Frame.endNalUnit();
}
h264Frame.startNalUnit();
h264Frame.writeBytes(currentNal.data);
}
if (h264Frame) {
this.finishFrame(tags, h264Frame);
}
this.trigger('data', {track: track, tags: tags});
// Continue with the flush process now
this.trigger('done');
};
};
VideoSegmentStream.prototype = new Stream();
/**
* The final stage of the transmuxer that emits the flv tags
* for audio, video, and metadata. Also tranlates in time and
* outputs caption data and id3 cues.
*/
CoalesceStream = function(options) {
// Number of Tracks per output segment
// If greater than 1, we combine multiple
// tracks into a single segment
this.numberOfTracks = 0;
this.metadataStream = options.metadataStream;
this.videoTags = [];
this.audioTags = [];
this.videoTrack = null;
this.audioTrack = null;
this.pendingCaptions = [];
this.pendingMetadata = [];
this.pendingTracks = 0;
CoalesceStream.prototype.init.call(this);
// Take output from multiple
this.push = function(output) {
// buffer incoming captions until the associated video segment
// finishes
if (output.text) {
return this.pendingCaptions.push(output);
}
// buffer incoming id3 tags until the final flush
if (output.frames) {
return this.pendingMetadata.push(output);
}
if (output.track.type === 'video') {
this.videoTrack = output.track;
this.videoTags = output.tags;
this.pendingTracks++;
}
if (output.track.type === 'audio') {
this.audioTrack = output.track;
this.audioTags = output.tags;
this.pendingTracks++;
}
};
};
CoalesceStream.prototype = new Stream();
CoalesceStream.prototype.flush = function() {
var
id3,
caption,
i,
timelineStartPts,
event = {
tags: {},
captions: [],
metadata: []
};
if (this.pendingTracks < this.numberOfTracks) {
return;
}
if (this.videoTrack) {
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
} else if (this.audioTrack) {
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
}
event.tags.videoTags = this.videoTags;
event.tags.audioTags = this.audioTags;
// Translate caption PTS times into second offsets into the
// video timeline for the segment
for (i = 0; i < this.pendingCaptions.length; i++) {
caption = this.pendingCaptions[i];
caption.startTime = caption.startPts - timelineStartPts;
caption.startTime /= 90e3;
caption.endTime = caption.endPts - timelineStartPts;
caption.endTime /= 90e3;
event.captions.push(caption);
}
// Translate ID3 frame PTS times into second offsets into the
// video timeline for the segment
for (i = 0; i < this.pendingMetadata.length; i++) {
id3 = this.pendingMetadata[i];
id3.cueTime = id3.pts - timelineStartPts;
id3.cueTime /= 90e3;
event.metadata.push(id3);
}
// We add this to every single emitted segment even though we only need
// it for the first
event.metadata.dispatchType = this.metadataStream.dispatchType;
// Reset stream state
this.videoTrack = null;
this.audioTrack = null;
this.videoTags = [];
this.audioTags = [];
this.pendingCaptions.length = 0;
this.pendingMetadata.length = 0;
this.pendingTracks = 0;
// Emit the final segment
this.trigger('data', event);
this.trigger('done');
};
/**
* An object that incrementally transmuxes MPEG2 Trasport Stream
* chunks into an FLV.
*/
Transmuxer = function(options) {
var
self = this,
videoTrack,
audioTrack,
packetStream, parseStream, elementaryStream,
adtsStream, h264Stream,
videoSegmentStream, audioSegmentStream, captionStream,
coalesceStream;
Transmuxer.prototype.init.call(this);
options = options || {};
// expose the metadata stream
this.metadataStream = new m2ts.MetadataStream();
options.metadataStream = this.metadataStream;
// set up the parsing pipeline
packetStream = new m2ts.TransportPacketStream();
parseStream = new m2ts.TransportParseStream();
elementaryStream = new m2ts.ElementaryStream();
adtsStream = new AdtsStream();
h264Stream = new H264Stream();
coalesceStream = new CoalesceStream(options);
// disassemble MPEG2-TS packets into elementary streams
packetStream
.pipe(parseStream)
.pipe(elementaryStream);
// !!THIS ORDER IS IMPORTANT!!
// demux the streams
elementaryStream
.pipe(h264Stream);
elementaryStream
.pipe(adtsStream);
elementaryStream
.pipe(this.metadataStream)
.pipe(coalesceStream);
// if CEA-708 parsing is available, hook up a caption stream
captionStream = new m2ts.CaptionStream();
h264Stream.pipe(captionStream)
.pipe(coalesceStream);
// hook up the segment streams once track metadata is delivered
elementaryStream.on('data', function(data) {
var i, videoTrack, audioTrack;
if (data.type === 'metadata') {
i = data.tracks.length;
// scan the tracks listed in the metadata
while (i--) {
if (data.tracks[i].type === 'video') {
videoTrack = data.tracks[i];
} else if (data.tracks[i].type === 'audio') {
audioTrack = data.tracks[i];
}
}
// hook up the video segment stream to the first track with h264 data
if (videoTrack && !videoSegmentStream) {
coalesceStream.numberOfTracks++;
videoSegmentStream = new VideoSegmentStream(videoTrack);
// Set up the final part of the video pipeline
h264Stream
.pipe(videoSegmentStream)
.pipe(coalesceStream);
}
if (audioTrack && !audioSegmentStream) {
// hook up the audio segment stream to the first track with aac data
coalesceStream.numberOfTracks++;
audioSegmentStream = new AudioSegmentStream(audioTrack);
// Set up the final part of the audio pipeline
adtsStream
.pipe(audioSegmentStream)
.pipe(coalesceStream);
}
}
});
// feed incoming data to the front of the parsing pipeline
this.push = function(data) {
packetStream.push(data);
};
// flush any buffered data
this.flush = function() {
// Start at the top of the pipeline and flush all pending work
packetStream.flush();
};
// Re-emit any data coming from the coalesce stream to the outside world
coalesceStream.on('data', function (event) {
self.trigger('data', event);
});
// Let the consumer know we have finished flushing the entire pipeline
coalesceStream.on('done', function () {
self.trigger('done');
});
// For information on the FLV format, see
// http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf.
// Technically, this function returns the header and a metadata FLV tag
// if duration is greater than zero
// duration in seconds
// @return {object} the bytes of the FLV header as a Uint8Array
this.getFlvHeader = function(duration, audio, video) { // :ByteArray {
var
headBytes = new Uint8Array(3 + 1 + 1 + 4),
head = new DataView(headBytes.buffer),
metadata,
result,
metadataLength;
// default arguments
duration = duration || 0;
audio = audio === undefined? true : audio;
video = video === undefined? true : video;
// signature
head.setUint8(0, 0x46); // 'F'
head.setUint8(1, 0x4c); // 'L'
head.setUint8(2, 0x56); // 'V'
// version
head.setUint8(3, 0x01);
// flags
head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00));
// data offset, should be 9 for FLV v1
head.setUint32(5, headBytes.byteLength);
// init the first FLV tag
if (duration <= 0) {
// no duration available so just write the first field of the first
// FLV tag
result = new Uint8Array(headBytes.byteLength + 4);
result.set(headBytes);
result.set([0, 0, 0, 0], headBytes.byteLength);
return result;
}
// write out the duration metadata tag
metadata = new FlvTag(FlvTag.METADATA_TAG);
metadata.pts = metadata.dts = 0;
metadata.writeMetaDataDouble("duration", duration);
metadataLength = metadata.finalize().length;
result = new Uint8Array(headBytes.byteLength + metadataLength);
result.set(headBytes);
result.set(head.byteLength, metadataLength);
return result;
};
};
Transmuxer.prototype = new Stream();
// forward compatibility
module.exports = Transmuxer;
},{"../codecs/adts.js":2,"../codecs/h264":3,"../m2ts/m2ts.js":11,"../utils/stream.js":20,"./flv-tag.js":5}],8:[function(require,module,exports){
'use strict';
var muxjs = {
codecs: require('./codecs'),
mp4: require('./mp4'),
flv: require('./flv'),
mp2t: require('./m2ts'),
};
module.exports = muxjs;
},{"./codecs":4,"./flv":6,"./m2ts":10,"./mp4":14}],9:[function(require,module,exports){
/**
* mux.js
*
* Copyright (c) 2015 Brightcove
* All rights reserved.
*
* Reads in-band caption information from a video elementary
* stream. Captions must follow the CEA-708 standard for injection
* into an MPEG-2 transport streams.
* @see https://en.wikipedia.org/wiki/CEA-708
*/
'use strict';
// -----------------
// Link To Transport
// -----------------
// Supplemental enhancement information (SEI) NAL units have a
// payload type field to indicate how they are to be
// interpreted. CEAS-708 caption content is always transmitted with
// payload type 0x04.
var USER_DATA_REGISTERED_ITU_T_T35 = 4,
RBSP_TRAILING_BITS = 128,
Stream = require('../utils/stream');
/**
* Parse a supplemental enhancement information (SEI) NAL unit.
* Stops parsing once a message of type ITU T T35 has been found.
*
* @param bytes {Uint8Array} the bytes of a SEI NAL unit
* @return {object} the parsed SEI payload
* @see Rec. ITU-T H.264, 7.3.2.3.1
*/
var parseSei = function(bytes) {
var
i = 0,
result = {
payloadType: -1,
payloadSize: 0,
},
payloadType = 0,
payloadSize = 0;
// go through the sei_rbsp parsing each each individual sei_message
while (i < bytes.byteLength) {
// stop once we have hit the end of the sei_rbsp
if (bytes[i] === RBSP_TRAILING_BITS) {
break;
}
// Parse payload type
while (bytes[i] === 0xFF) {
payloadType += 255;
i++;
}
payloadType += bytes[i++];
// Parse payload size
while (bytes[i] === 0xFF) {
payloadSize += 255;
i++;
}
payloadSize += bytes[i++];
// this sei_message is a 608/708 caption so save it and break
// there can only ever be one caption message in a frame's sei
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {
result.payloadType = payloadType;
result.payloadSize = payloadSize;
result.payload = bytes.subarray(i, i + payloadSize);
break;
}
// skip the payload and parse the next message
i += payloadSize;
payloadType = 0;
payloadSize = 0;
}
return result;
};
// see ANSI/SCTE 128-1 (2013), section 8.1
var parseUserData = function(sei) {
// itu_t_t35_contry_code must be 181 (United States) for
// captions
if (sei.payload[0] !== 181) {
return null;
}
// itu_t_t35_provider_code should be 49 (ATSC) for captions
if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) {
return null;
}
//