@tgsnake/core
Version:
Pure Telegram MTProto library for nodejs
251 lines (250 loc) • 10.5 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.upload = upload;
exports.uploadStream = uploadStream;
const platform_node_js_1 = require("../platform.node.js");
const Queue_js_1 = require("../Queue.js");
const index_js_1 = require("../errors/index.js");
const index_js_2 = require("../session/index.js");
const index_js_3 = require("../raw/index.js");
const Logger_js_1 = require("../Logger.js");
async function upload(client, source, fileName, fileId, filePart = 0, progress) {
const release = await client._saveFileSemaphore.acquire();
try {
const queue = new Queue_js_1.Queue(1);
const partSize = 512 * 1024;
const user = client._me?.users.find((user) => user.id === client._me?.fullUser.id);
const premium = user && 'premium' in user ? user.premium : false;
const fileSizeLimitMiB = premium ? 4000 : 2000;
const fileSize = platform_node_js_1.Buffer.byteLength(source);
if (fileSize === 0) {
throw new index_js_1.FileErrors.FileUploadZero();
}
if (fileSize > fileSizeLimitMiB * 1024 * 1024) {
throw new index_js_1.FileErrors.FileUploadBigger(fileSizeLimitMiB * 1024 * 1024, fileSize);
}
const worker = async (session, index) => {
Logger_js_1.Logger.debug(`[142] Worker ${index} running`);
while (true) {
Logger_js_1.Logger.debug(`[143] Worker ${index} getting the queue`);
const data = await queue.get();
Logger_js_1.Logger.debug(`[144] Worker ${index} successfully getting the queue`);
if (data === null) {
Logger_js_1.Logger.debug(`[145] Worker ${index} finished`);
return;
}
if (data) {
try {
Logger_js_1.Logger.debug(`[146] Worker ${index} sending data from queue`);
await session.invoke(data);
}
catch (error) {
Logger_js_1.Logger.error(`[147] Error when uploading file:`, error);
}
}
}
};
const fileTotalParts = Math.ceil(fileSize / partSize);
const isBig = fileSize > 10 * 1024 * 1024;
const workersAmount = isBig ? 4 : 1;
const isMissingPart = fileId !== undefined;
fileId = fileId || platform_node_js_1.Buffer.from(platform_node_js_1.crypto.randomBytes(8)).readBigInt64LE();
const file = new index_js_3.BytesIO(source);
const md5 = !isBig && !isMissingPart ? platform_node_js_1.crypto.createHash('md5').update(source).digest('hex') : '';
const session = new index_js_2.Session(client, client._storage.dcId, client._storage.authKey, client._storage.testMode, client._proxy, true);
const workers = Array(workersAmount)
.fill(null)
.map((_, i) => (() => worker(session, i + 1))());
try {
await session.start();
file.seek(partSize * filePart);
while (true) {
const chunk = file.read(partSize);
if (!platform_node_js_1.Buffer.byteLength(chunk)) {
break;
}
if (isBig) {
await queue.put(new index_js_3.Raw.upload.SaveBigFilePart({
fileId: fileId,
filePart: filePart,
fileTotalParts: fileTotalParts,
bytes: chunk,
}));
}
else {
await queue.put(new index_js_3.Raw.upload.SaveFilePart({
fileId: fileId,
filePart: filePart,
bytes: chunk,
}));
}
if (isMissingPart) {
return;
}
filePart += 1;
if (progress) {
progress(Math.min(filePart * partSize, fileSize), fileSize);
}
}
if (isBig) {
return new index_js_3.Raw.InputFileBig({
id: fileId,
parts: fileTotalParts,
name: fileName ?? 'file.unknown',
});
}
else {
return new index_js_3.Raw.InputFile({
id: fileId,
parts: fileTotalParts,
name: fileName ?? 'file.unknown',
md5Checksum: md5,
});
}
}
catch (error) {
Logger_js_1.Logger.error('[141] Got error when trying to put rpc to queue', error);
}
finally {
for (let _ of workers) {
await queue.put(null);
}
await queue.put(null);
await queue.get();
await session.stop();
}
}
finally {
if (platform_node_js_1.isDeno) {
release();
}
else {
release[1]();
}
}
}
async function uploadStream(client, source, fileName, progress) {
if (!source.readable || !source._readableState) {
throw new index_js_1.FileErrors.FileIsNotReadable();
}
if (source.readableHighWaterMark !== 512 * 1024) {
source._readableState.highWaterMark = 512 * 1024;
}
const release = await client._saveFileSemaphore.acquire();
Logger_js_1.Logger.debug(`[148] Upload stream started.`);
try {
let resolve;
const partSize = 512 * 1024;
let filePart = 0;
let totalStreamSize = 0;
const fileId = platform_node_js_1.Buffer.from(platform_node_js_1.crypto.randomBytes(8)).readBigInt64LE();
let hasEndedBefore = false;
const queue = new Queue_js_1.Queue(1);
const waitUpload = new Promise((res) => {
resolve = res;
});
const user = client._me?.users.find((user) => user.id === client._me?.fullUser.id);
const premium = user && 'premium' in user ? true : false;
const fileSizeLimitMiB = premium ? 4000 : 2000;
const worker = async (session, index) => {
Logger_js_1.Logger.debug(`[149] Worker ${index} running`);
while (true) {
Logger_js_1.Logger.debug(`[150] Worker ${index} getting the queue`);
const data = await queue.get();
Logger_js_1.Logger.debug(`[151] Worker ${index} successfully getting the queue`);
if (data === null) {
Logger_js_1.Logger.debug(`[152] Worker ${index} finished`);
return;
}
if (data) {
try {
Logger_js_1.Logger.debug(`[153] Worker ${index} sending data from queue`);
await session.invoke(data);
}
catch (error) {
Logger_js_1.Logger.error(`[154] Error when uploading file:`, error);
}
}
}
};
const session = new index_js_2.Session(client, client._storage.dcId, client._storage.authKey, client._storage.testMode, client._proxy, true);
const workers = Array(4)
.fill(null)
.map((_, i) => (() => worker(session, i + 1))());
try {
await session.start();
const uploader = new platform_node_js_1.Writable({
highWaterMark: 512 * 1024,
async write(chunk, encoding, callback) {
totalStreamSize += platform_node_js_1.Buffer.byteLength(chunk);
if (totalStreamSize > fileSizeLimitMiB * 1024 * 1024) {
throw new index_js_1.FileErrors.FileUploadBigger(fileSizeLimitMiB * 1024 * 1024, totalStreamSize);
}
if (platform_node_js_1.Buffer.byteLength(chunk) < 512 * 1024) {
hasEndedBefore = true;
await queue.put(new index_js_3.Raw.upload.SaveBigFilePart({
fileId: fileId,
filePart: filePart,
fileTotalParts: Math.ceil(totalStreamSize / partSize),
bytes: platform_node_js_1.Buffer.from(chunk, encoding),
}));
}
else {
await queue.put(new index_js_3.Raw.upload.SaveBigFilePart({
fileId: fileId,
filePart: filePart,
fileTotalParts: -1,
bytes: platform_node_js_1.Buffer.from(chunk, encoding),
}));
}
filePart += 1;
if (progress) {
progress(filePart * partSize, -1);
}
return callback();
},
});
uploader.on('finish', async () => {
if (!hasEndedBefore) {
hasEndedBefore = true;
await queue.put(new index_js_3.Raw.upload.SaveBigFilePart({
fileId: fileId,
filePart: filePart,
fileTotalParts: Math.ceil(totalStreamSize / partSize),
bytes: platform_node_js_1.Buffer.alloc(0),
}));
}
if (progress) {
progress(filePart * partSize, totalStreamSize);
}
resolve(true);
});
source.pipe(uploader);
await waitUpload;
return new index_js_3.Raw.InputFileBig({
id: fileId,
parts: Math.ceil(totalStreamSize / partSize),
name: fileName ?? 'file.unknown',
});
}
catch (error) {
Logger_js_1.Logger.error('[155] Got error when trying to put rpc to queue', error);
}
finally {
for (let _ of workers) {
await queue.put(null);
}
await queue.put(null);
await queue.get();
await session.stop();
}
}
finally {
if (platform_node_js_1.isDeno) {
release();
}
else {
release[1]();
}
}
}