filefive
Version:
SFTP/FTP/Amazon S3 client and dual-panel file manager for macOS and Linux
225 lines (224 loc) • 8.01 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.ATTRIBUTES = void 0;
const node_path_1 = require("node:path");
const node_fs_1 = require("node:fs");
const path_1 = require("../utils/path");
const FileSystem_1 = require("../FileSystem");
const client_s3_1 = require("@aws-sdk/client-s3");
// https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/client/s3/
exports.ATTRIBUTES = [
{
name: "name",
type: FileSystem_1.FileAttributeType.String,
title: "Name"
},
{
name: "size",
type: FileSystem_1.FileAttributeType.Number,
title: "Size"
},
{
name: "modified",
type: FileSystem_1.FileAttributeType.Date,
title: "Last Modified"
}
];
class S3 extends FileSystem_1.FileSystem {
constructor(host = 'https://s3.amazonaws.com', // the global endpoint (auto-detects the bucket's region)
accessKeyId, secretAccessKey, region = 'us-east-1', // the default region, particularly for legacy or unspecified requests
port = 443, onError = (e) => { }, onClose = () => { }) {
super();
this.host = host;
this.accessKeyId = accessKeyId;
this.secretAccessKey = secretAccessKey;
this.region = region;
this.port = port;
this.onError = onError;
this.onClose = onClose;
}
async open() {
try {
this.connection = new client_s3_1.S3Client({
endpoint: `${this.host}:${this.port}`,
region: this.region,
forcePathStyle: true, // Needed for MinIO and some other non-AWS services
credentials: {
accessKeyId: this.accessKeyId,
secretAccessKey: this.secretAccessKey
}
});
}
catch (e) {
this.onError(e);
}
return Promise.resolve(true);
}
close() {
this.connection && this.onClose();
this.connection?.destroy();
this.connection = null;
}
opened() {
return this.connection != null;
}
async pwd() {
return Promise.resolve('/');
}
async ls(dir) {
const parts = (0, path_1.split)(dir);
if (!parts.length) {
return this.listBuckets();
}
const target = parts.length > 1 ? parts.slice(1).join('/') + '/' : undefined;
const output = await this.connection.send(new client_s3_1.ListObjectsCommand({
Bucket: parts[0],
MaxKeys: 1000,
Delimiter: '/',
Prefix: target
}));
return [
...(output.CommonPrefixes ?? []).map(prefix => ({
path: (0, node_path_1.resolve)('/', parts[0], prefix.Prefix),
name: (0, node_path_1.basename)(prefix.Prefix),
dir: true,
size: 0,
modified: new Date()
})),
...(output.Contents ?? [])
.filter(({ Key }) => Key != target)
.map(item => ({
path: (0, node_path_1.resolve)('/', parts[0], item.Key),
name: (0, node_path_1.basename)(item.Key),
dir: false,
size: item.Size,
modified: new Date(item.LastModified)
}))
];
}
async get(fromRemote, toLocal) {
const parts = (0, path_1.split)(fromRemote);
const output = await this.connection.send(new client_s3_1.GetObjectCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/')
}));
if (output.Body) {
const body = output.Body;
const writeStream = (0, node_fs_1.createWriteStream)(toLocal);
body.pipe(writeStream);
return new Promise((resolve, reject) => {
writeStream.on('finish', resolve);
writeStream.on('error', reject);
});
}
return Promise.resolve();
}
async put(fromLocal, toRemote) {
const parts = (0, path_1.split)(toRemote);
const { UploadId } = await this.connection.send(new client_s3_1.CreateMultipartUploadCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/')
}));
const partSize = 5 * 1024 * 1024;
const fromStream = (0, node_fs_1.createReadStream)(fromLocal, { highWaterMark: partSize });
let buffer = [];
let partNumber = 1;
const uploadParts = [];
const uploadPart = async () => {
const { ETag } = await this.connection.send(new client_s3_1.UploadPartCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/'),
PartNumber: partNumber,
UploadId,
Body: Buffer.concat(buffer)
}));
uploadParts.push({ ETag, PartNumber: partNumber });
partNumber++;
buffer = [];
};
for await (const chunk of fromStream) {
buffer.push(chunk);
if (buffer.length >= partSize) {
await uploadPart();
}
}
if (buffer.length) {
await uploadPart();
}
await this.connection.send(new client_s3_1.CompleteMultipartUploadCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/'),
UploadId,
MultipartUpload: { Parts: uploadParts }
}));
}
async rm(path, recursive) {
const parts = (0, path_1.split)(path);
await this.connection.send(new client_s3_1.DeleteObjectCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/') + (recursive ? '/' : '')
}));
}
async mkdir(path) {
const parts = (0, path_1.split)(path);
try {
await this.connection.send(new client_s3_1.PutObjectCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/') + '/',
Body: ''
}));
}
catch (e) {
this.onError(e);
}
}
async rename(from, to) {
return this.mv(from, to);
}
async mv(from, to) {
const isDir = (await this.ls((0, node_path_1.dirname)(from))).find(({ path }) => path == from)?.dir ?? false;
await this.cp(from, to, isDir);
const recursiveRm = async (path) => {
await Promise.all((await this.ls(path)).map(f => f.dir ? recursiveRm(f.path) : this.rm(f.path, false)));
await this.rm(path, true);
};
await (isDir ? recursiveRm(from) : this.rm(from, isDir));
}
async cp(from, to, recursive) {
if (recursive) {
await Promise.all((await this.ls(from)).map(f => this.cp(f.path, (0, node_path_1.resolve)(to, f.name), f.dir)));
}
else {
const parts = (0, path_1.split)(from);
await this.connection.send(new client_s3_1.CopyObjectCommand({
Bucket: parts[0],
CopySource: (0, node_path_1.resolve)(from) + (recursive ? '/' : ''),
Key: (0, path_1.split)(to).slice(1).join('/') + (recursive ? '/' : '')
}));
}
}
async write(path, s) {
const parts = (0, path_1.split)(path);
try {
await this.connection.send(new client_s3_1.PutObjectCommand({
Bucket: parts[0],
Key: parts.slice(1).join('/'),
Body: s
}));
}
catch (e) {
this.onError(e);
}
}
async listBuckets() {
const output = await this.connection.send(new client_s3_1.ListBucketsCommand({}));
return (output.Buckets ?? []).map(bucket => ({
path: (0, node_path_1.join)('/', bucket.Name),
name: bucket.Name,
dir: true,
size: 0,
modified: bucket.CreationDate ? new Date(bucket.CreationDate) : new Date()
}));
}
}
exports.default = S3;