ts-ebml-esm
Version:
ebml decoder and encoder
176 lines (175 loc) • 9.31 kB
JavaScript
import { EventEmitter as l } from "node:events";
import { e as h, V as d } from "./tools-CVDSnz1X.js";
class f extends l {
constructor() {
super(), this.logGroup = "", this.hasLoggingStarted = !1, this.metadataloaded = !1, this.chunks = [], this.stack = [], this.segmentOffset = 0, this.last2SimpleBlockVideoTrackTimestamp = [0, 0], this.last2SimpleBlockAudioTrackTimestamp = [0, 0], this.lastClusterTimestamp = 0, this.lastClusterPosition = 0, this.timestampScale = 1e6, this.metadataSize = 0, this.metadatas = [], this.cues = [], this.firstVideoBlockRead = !1, this.firstAudioBlockRead = !1, this.currentTrack = {
TrackNumber: -1,
TrackType: -1,
DefaultDuration: null,
CodecDelay: null
}, this.trackTypes = [], this.trackDefaultDuration = [], this.trackCodecDelay = [], this.trackInfo = { type: "nothing" }, this.ended = !1, this.logging = !1, this.use_duration_every_simpleblock = !1, this.use_webp = !1, this.use_segment_info = !0, this.drop_default_duration = !0;
}
/**
* emit final state.
*/
stop() {
for (this.ended = !0, this.emit_segment_info(); this.stack.length; )
this.stack.pop(), this.logging && console.groupEnd();
this.logging && this.hasLoggingStarted && this.logGroup && console.groupEnd();
}
/**
* emit chunk info
*/
emit_segment_info() {
const t = this.chunks;
if (this.chunks = [], this.metadataloaded) {
if (!this.use_segment_info)
return;
const i = this.lastClusterTimestamp, e = this.duration, s = this.timestampScale;
this.emit("cluster", { timestamp: i, data: t }), this.emit("duration", { timestampScale: s, duration: e });
} else {
this.metadataloaded = !0, this.metadatas = t;
const i = this.trackTypes.indexOf(1), e = this.trackTypes.indexOf(2);
if (this.trackInfo = i >= 0 && e >= 0 ? { type: "both", trackNumber: i } : i >= 0 ? { type: "video", trackNumber: i } : e >= 0 ? { type: "audio", trackNumber: e } : { type: "nothing" }, !this.use_segment_info)
return;
this.emit("metadata", { data: t, metadataSize: this.metadataSize });
}
}
read(t) {
let i = !1;
if (!this.ended) {
if (t.type === "m")
if (t.isEnd)
this.stack.pop();
else {
const e = this.stack[this.stack.length - 1];
if (e != null && e.level >= t.level) {
this.stack.pop(), this.logging && console.groupEnd(), e.dataEnd = t.dataEnd, e.dataSize = t.dataEnd - e.dataStart, e.unknownSize = !1;
const s = Object.assign({}, e, {
name: e.name,
type: e.type,
isEnd: !0
});
this.chunks.push(s);
}
this.stack.push(t);
}
if (t.type === "m" && t.name === "Segment")
this.segmentOffset !== 0 && console.warn("Multiple segments detected!"), this.segmentOffset = t.dataStart, this.emit("segment_offset", this.segmentOffset);
else if (t.type === "b" && t.name === "SimpleBlock") {
const {
timecode: e,
trackNumber: s,
frames: r
} = h(t.data);
if (this.trackTypes[s] === 1) {
if (!this.firstVideoBlockRead && (this.firstVideoBlockRead = !0, this.trackInfo.type === "both" || this.trackInfo.type === "video")) {
const o = this.lastClusterTimestamp + e;
this.cues.push({
CueTrack: s,
CueClusterPosition: this.lastClusterPosition,
CueTime: o
}), this.emit("cue_info", {
CueTrack: s,
CueClusterPosition: this.lastClusterPosition,
CueTime: this.lastClusterTimestamp
}), this.emit("cue", {
CueTrack: s,
CueClusterPosition: this.lastClusterPosition,
CueTime: o
});
}
this.last2SimpleBlockVideoTrackTimestamp = [
this.last2SimpleBlockVideoTrackTimestamp[1],
e
];
} else if (this.trackTypes[s] === 2) {
if (!this.firstAudioBlockRead && (this.firstAudioBlockRead = !0, this.trackInfo.type === "audio")) {
const o = this.lastClusterTimestamp + e;
this.cues.push({
CueTrack: s,
CueClusterPosition: this.lastClusterPosition,
CueTime: o
}), this.emit("cue_info", {
CueTrack: s,
CueClusterPosition: this.lastClusterPosition,
CueTime: this.lastClusterTimestamp
}), this.emit("cue", {
CueTrack: s,
CueClusterPosition: this.lastClusterPosition,
CueTime: o
});
}
this.last2SimpleBlockAudioTrackTimestamp = [
this.last2SimpleBlockAudioTrackTimestamp[1],
e
];
}
if (this.use_duration_every_simpleblock && this.emit("duration", {
timestampScale: this.timestampScale,
duration: this.duration
}), this.use_webp)
for (const o of r) {
if (o.subarray(3, 6).toString("hex") !== "9d012a")
break;
const c = d(o), n = new Blob([c], { type: "image/webp" }), u = this.duration;
this.emit("webp", { currentTime: u, webp: n });
}
} else t.type === "m" && t.name === "Cluster" && !t.isEnd ? (this.firstVideoBlockRead = !1, this.firstAudioBlockRead = !1, this.emit_segment_info(), this.emit("cluster_ptr", t.tagStart), this.lastClusterPosition = t.tagStart) : t.type === "u" && t.name === "Timestamp" ? this.lastClusterTimestamp = t.value : t.type === "u" && t.name === "TimestampScale" ? this.timestampScale = t.value : t.type === "m" && t.name === "TrackEntry" ? t.isEnd ? (this.trackTypes[this.currentTrack.TrackNumber] = this.currentTrack.TrackType, this.trackDefaultDuration[this.currentTrack.TrackNumber] = this.currentTrack.DefaultDuration, this.trackCodecDelay[this.currentTrack.TrackNumber] = this.currentTrack.CodecDelay) : this.currentTrack = {
TrackNumber: -1,
TrackType: -1,
DefaultDuration: null,
CodecDelay: null
} : t.type === "u" && t.name === "TrackType" ? this.currentTrack.TrackType = t.value : t.type === "u" && t.name === "TrackNumber" ? this.currentTrack.TrackNumber = t.value : t.type === "u" && t.name === "CodecDelay" ? this.currentTrack.CodecDelay = t.value : t.type === "u" && t.name === "DefaultDuration" ? this.drop_default_duration ? (console.warn("DefaultDuration detected!, remove it"), i = !0) : this.currentTrack.DefaultDuration = t.value : t.name === "unknown" && console.warn(t);
!this.metadataloaded && t.dataEnd > 0 && (this.metadataSize = t.dataEnd), i || this.chunks.push(t), this.logging && this.put(t);
}
}
/**
* DefaultDuration が定義されている場合は最後のフレームのdurationも考慮する
* 単位 timestampScale
*
* !!! if you need duration with seconds !!!
* ```js
* const nanosec = reader.duration * reader.timestampScale;
* const sec = nanosec / 1000 / 1000 / 1000;
* ```
*/
get duration() {
if (this.trackInfo.type === "nothing")
return console.warn("no video, no audio track"), 0;
let t = 0, i = 0, e = 0;
const s = this.trackDefaultDuration[this.trackInfo.trackNumber];
if (typeof s == "number")
t = s;
else if (this.trackInfo.type === "both")
if (this.last2SimpleBlockAudioTrackTimestamp[1] > this.last2SimpleBlockVideoTrackTimestamp[1]) {
t = (this.last2SimpleBlockAudioTrackTimestamp[1] - this.last2SimpleBlockAudioTrackTimestamp[0]) * this.timestampScale;
const a = this.trackCodecDelay[this.trackTypes.indexOf(2)];
typeof a == "number" && (i = a), e = this.last2SimpleBlockAudioTrackTimestamp[1];
} else {
t = (this.last2SimpleBlockVideoTrackTimestamp[1] - this.last2SimpleBlockVideoTrackTimestamp[0]) * this.timestampScale;
const a = this.trackCodecDelay[this.trackTypes.indexOf(1)];
typeof a == "number" && (i = a), e = this.last2SimpleBlockVideoTrackTimestamp[1];
}
else if (this.trackInfo.type === "video") {
t = (this.last2SimpleBlockVideoTrackTimestamp[1] - this.last2SimpleBlockVideoTrackTimestamp[0]) * this.timestampScale;
const a = this.trackCodecDelay[this.trackInfo.trackNumber];
typeof a == "number" && (i = a), e = this.last2SimpleBlockVideoTrackTimestamp[1];
} else if (this.trackInfo.type === "audio") {
t = (this.last2SimpleBlockAudioTrackTimestamp[1] - this.last2SimpleBlockAudioTrackTimestamp[0]) * this.timestampScale;
const a = this.trackCodecDelay[this.trackInfo.trackNumber];
typeof a == "number" && (i = a), e = this.last2SimpleBlockAudioTrackTimestamp[1];
}
const o = ((this.lastClusterTimestamp + e) * this.timestampScale + t - i) / this.timestampScale;
return Math.floor(o);
}
addListener(t, i) {
return super.addListener(t, i);
}
put(t) {
this.hasLoggingStarted || (this.hasLoggingStarted = !0, this.logging && this.logGroup && console.groupCollapsed(this.logGroup)), t.type === "m" ? t.isEnd ? console.groupEnd() : console.group(t.name + ":" + t.tagStart) : t.type === "b" ? console.log(t.name, t.type) : console.log(t.name, t.tagStart, t.type, t.value);
}
}
export {
f as default
};