tus-js-client-stall-detection
Version:
A pure JavaScript client for the tus resumable upload protocol (fork with stall detection)
321 lines (267 loc) • 9.81 kB
text/typescript
// The url.parse method is superseeded by the url.URL constructor,
// but it is still included in Node.js
import * as http from 'node:http'
import * as https from 'node:https'
import * as net from 'node:net'
import { Readable, Transform, type Writable } from 'node:stream'
import { parse } from 'node:url'
import isStream from 'is-stream'
import throttle from 'lodash.throttle'
import type {
HttpProgressHandler,
HttpRequest,
HttpResponse,
HttpStack,
SliceType,
} from '../options.js'
export class NodeHttpStack implements HttpStack {
private _requestOptions: http.RequestOptions
constructor(requestOptions: http.RequestOptions = {}) {
this._requestOptions = requestOptions
}
createRequest(method: string, url: string) {
return new Request(method, url, this._requestOptions)
}
getName() {
return 'NodeHttpStack'
}
supportsProgressEvents(): boolean {
// Node.js HTTP stack supports progress tracking through streams
return true
}
}
class Request implements HttpRequest {
private _method: string
private _url: string
private _headers: Record<string, string> = {}
private _request: http.ClientRequest | null = null
private _progressHandler: HttpProgressHandler = () => {}
private _requestOptions: http.RequestOptions
constructor(method: string, url: string, options: http.RequestOptions) {
this._method = method
this._url = url
this._requestOptions = options
}
getMethod() {
return this._method
}
getURL() {
return this._url
}
setHeader(header: string, value: string) {
this._headers[header] = value
}
getHeader(header: string) {
return this._headers[header]
}
setProgressHandler(progressHandler: HttpProgressHandler) {
this._progressHandler = progressHandler
}
async send(body?: SliceType): Promise<HttpResponse> {
let nodeBody: Readable | Uint8Array | undefined
if (body != null) {
if (body instanceof Blob) {
nodeBody = new Uint8Array(await body.arrayBuffer())
} else if (body instanceof Uint8Array) {
nodeBody = body
} else if (ArrayBuffer.isView(body)) {
// Any typed array other than Uint8Array or a DataVew
nodeBody = new Uint8Array(body.buffer, body.byteOffset, body.byteLength)
} else if (isStream.readable(body)) {
nodeBody = body
} else {
throw new Error(
// @ts-expect-error According to the types, this case cannot happen. But
// we still want to try logging the constructor if this code is reached by accident.
`Unsupported HTTP request body type in Node.js HTTP stack: ${typeof body} (constructor: ${body?.constructor?.name})`,
)
}
}
return new Promise((resolve, reject) => {
const options = {
...parse(this._url),
...this._requestOptions,
method: this._method,
headers: {
...(this._requestOptions.headers || {}),
...this._headers,
},
}
// TODO: What to do here?
// @ts-expect-error We still have to type `size` for `body`
if (body?.size) {
// @ts-expect-error We still have to type `size` for `body`
options.headers['Content-Length'] = body.size
}
const httpModule = options.protocol === 'https:' ? https : http
this._request = httpModule.request(options)
const req = this._request
req.on('response', (res) => {
const resChunks: Buffer[] = []
res.on('data', (data: Buffer) => {
resChunks.push(data)
})
res.on('end', () => {
const responseText = Buffer.concat(resChunks).toString('utf8')
resolve(new Response(res, responseText))
})
})
req.on('error', (err) => {
reject(err)
})
if (nodeBody instanceof Readable) {
// Readable stream are piped through a PassThrough instance, which
// counts the number of bytes passed through. This is used, for example,
// when an fs.ReadStream is provided to tus-js-client.
nodeBody.pipe(new ProgressEmitter(this._progressHandler)).pipe(req)
} else if (nodeBody instanceof Uint8Array) {
// For Buffers and Uint8Arrays (in Node.js all buffers are instances of Uint8Array),
// we write chunks of the buffer to the stream and use that to track the progress.
// This is used when either a Buffer or a normal readable stream is provided
// to tus-js-client.
writeBufferToStreamWithProgress(req, nodeBody, this._progressHandler)
} else {
req.end()
}
})
}
abort() {
if (this._request != null) this._request.abort()
return Promise.resolve()
}
getUnderlyingObject() {
return this._request
}
}
class Response implements HttpResponse {
private _response: http.IncomingMessage
private _body: string
constructor(res: http.IncomingMessage, body: string) {
this._response = res
this._body = body
}
getStatus() {
if (this._response.statusCode === undefined) {
throw new Error('no status code available yet')
}
return this._response.statusCode
}
getHeader(header: string) {
const values = this._response.headers[header.toLowerCase()]
if (Array.isArray(values)) {
return values.join(', ')
}
return values
}
getBody() {
return this._body
}
getUnderlyingObject() {
return this._response
}
}
// ProgressEmitter is a simple PassThrough-style transform stream which keeps
// track of the number of bytes which have been piped through it and will
// invoke the `onprogress` function whenever new number are available.
class ProgressEmitter extends Transform {
private _onprogress: HttpProgressHandler
private _position = 0
constructor(onprogress: HttpProgressHandler) {
super()
// The _onprogress property will be invoked, whenever a chunk is piped
// through this transformer. Since chunks are usually quite small (64kb),
// these calls can occur frequently, especially when you have a good
// connection to the remote server. Therefore, we are throtteling them to
// prevent excessive function calls. We use trailing: true to ensure
// the final progress event is always reported.
this._onprogress = throttle(onprogress, 100, {
leading: true,
trailing: true,
})
// Report initial progress immediately
this._onprogress(0)
}
_transform(
chunk: Buffer,
_encoding: string,
callback: (err: Error | null, data: Buffer) => void,
) {
this._position += chunk.length
this._onprogress(this._position)
callback(null, chunk)
}
}
// writeBufferToStreamWithProgress writes chunks from `source` (either a
// Buffer or Uint8Array) to the readable stream `stream`.
// The size of the chunk depends on the stream's highWaterMark to fill the
// stream's internal buffer as best as possible.
// If the internal buffer is full, the callback `onprogress` will be invoked
// to notify about the write progress. Writing will be resumed once the internal
// buffer is empty, as indicated by the emitted `drain` event.
// See https://nodejs.org/docs/latest/api/stream.html#buffering for more details
// on the buffering behavior of streams.
function writeBufferToStreamWithProgress(
stream: Writable,
source: Uint8Array,
onprogress: HttpProgressHandler,
) {
// Don't throttle progress events here to ensure stall detector gets accurate updates
// The user-facing progress events can handle their own throttling if needed
let offset = 0
let lastReportedBytes = 0
// Report initial progress
onprogress(0)
// For HTTP requests, we can track actual bytes sent via the socket
const request = stream as http.ClientRequest
// Set up socket monitoring to track actual network progress
function setupSocketMonitoring(socket: net.Socket) {
// Poll the socket's bytesWritten to track actual network progress
const progressInterval = setInterval(() => {
const currentBytes = socket.bytesWritten
// Only report progress if bytes have actually been sent
if (currentBytes > lastReportedBytes) {
lastReportedBytes = currentBytes
// Report the actual bytes sent, capped by our total size
const progress = Math.min(currentBytes, source.length)
onprogress(progress)
}
// Stop monitoring once we've sent all data
if (currentBytes >= source.length || socket.destroyed) {
clearInterval(progressInterval)
}
}, 50) // Check every 50ms for progress
// Clean up on socket close
socket.once('close', () => {
clearInterval(progressInterval)
})
}
// Monitor the socket for actual network progress
if (request.socket) {
setupSocketMonitoring(request.socket)
} else {
request.once('socket', (socket) => {
setupSocketMonitoring(socket)
})
}
function writeNextChunk() {
// Take at most the amount of bytes from highWaterMark. This should fill the streams
// internal buffer already.
const chunkSize = Math.min(stream.writableHighWaterMark, source.length - offset)
// Note: We use subarray instead of slice because it works without copying data for
// Buffers and Uint8Arrays.
const chunk = source.subarray(offset, offset + chunkSize)
offset += chunk.length
const canContinue = stream.write(chunk)
if (!canContinue) {
// The stream buffer is full. Wait for the drain event before writing more data.
stream.once('drain', writeNextChunk)
} else if (offset < source.length) {
// Use setImmediate to avoid blocking the event loop
setImmediate(writeNextChunk)
} else {
// All data has been written to the stream buffer
stream.end()
}
}
writeNextChunk()
}