@nuxthub/core
Version:
Build full-stack Nuxt applications, with zero configuration.
318 lines (317 loc) • 11.8 kB
JavaScript
import { AwsClient } from "aws4fetch";
import { getContentType } from "../utils.mjs";
import { camelCase, snakeCase } from "scule";
const xmlTagContent = (xml, tag) => {
const match = xml.match(new RegExp(`<${tag}>([\\s\\S]*?)</${tag}>`));
return match ? decodeURIComponent(match[1]) : void 0;
};
const xmlTagContentRequired = (xml, tag) => {
const content = xmlTagContent(xml, tag);
if (!content) throw new Error(`Missing <${tag}> in XML.`);
return content;
};
const xmlTag = (tag, content) => {
return `<${tag}>${content}</${tag}>`;
};
const parseListResponse = (xml) => {
const objects = [];
const prefixes = [];
const contentsMatches = xml.matchAll(/<Contents>([\s\S]*?)<\/Contents>/g);
for (const match of contentsMatches) {
const content = match[1];
objects.push({
key: xmlTagContentRequired(content, "Key"),
size: Number.parseInt(xmlTagContent(content, "Size") || "0", 10),
lastModified: new Date(xmlTagContent(content, "LastModified") || ""),
etag: xmlTagContent(content, "ETag") || ""
});
}
const prefixMatches = xml.matchAll(/<CommonPrefixes>[\s\S]*?<Prefix>([\s\S]*?)<\/Prefix>[\s\S]*?<\/CommonPrefixes>/g);
for (const match of prefixMatches) {
prefixes.push(match[1]);
}
const isTruncated = xmlTagContent(xml, "IsTruncated") === "true";
const nextToken = xmlTagContent(xml, "NextContinuationToken");
return { objects, isTruncated, nextToken, prefixes };
};
function mapS3ObjectToBlob(object) {
return {
pathname: object.key,
contentType: getContentType(object.key),
size: object.size,
httpEtag: object.etag,
uploadedAt: object.lastModified,
httpMetadata: {},
customMetadata: {}
};
}
export function createDriver(options) {
const baseEndpoint = options.endpoint ?? `https://${options.bucket}.s3.${options.region}.amazonaws.com`;
const bucketUrl = options.endpoint && options.bucket ? `${baseEndpoint}/${options.bucket}` : baseEndpoint;
const aws = new AwsClient({
accessKeyId: options.accessKeyId,
secretAccessKey: options.secretAccessKey,
region: options.region,
service: "s3"
});
return {
name: "s3",
options,
async list(listOptions) {
const params = new URLSearchParams({
"list-type": "2",
"max-keys": String(listOptions?.limit ?? 1e3)
});
if (listOptions?.prefix) {
params.set("prefix", listOptions.prefix);
}
if (listOptions?.cursor) {
params.set("continuation-token", listOptions.cursor);
}
if (listOptions?.folded) {
params.set("delimiter", "/");
}
const res = await aws.fetch(`${bucketUrl}?${params}`);
if (!res.ok) {
throw new Error(`S3 list failed: ${res.status} ${res.statusText}`);
}
const xml = await res.text();
const { objects, isTruncated, nextToken, prefixes } = parseListResponse(xml);
return {
blobs: objects.map(mapS3ObjectToBlob),
hasMore: isTruncated,
cursor: nextToken,
folders: listOptions?.folded ? prefixes : void 0
};
},
async get(pathname) {
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}`);
if (res.status === 404) {
return null;
}
if (!res.ok) {
throw new Error(`S3 get failed: ${res.status} ${res.statusText}`);
}
const arrayBuffer = await res.arrayBuffer();
const contentType = res.headers.get("content-type") || getContentType(pathname);
return new Blob([arrayBuffer], { type: contentType });
},
async getArrayBuffer(pathname) {
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}`);
if (res.status === 404) {
return null;
}
if (!res.ok) {
throw new Error(`S3 get failed: ${res.status} ${res.statusText}`);
}
return res.arrayBuffer();
},
async put(pathname, body, putOptions) {
const contentType = putOptions?.contentType || (body instanceof File || body instanceof Blob ? body.type : void 0) || getContentType(pathname);
let processedBody;
if (body instanceof File || body instanceof Blob) {
processedBody = await body.arrayBuffer();
} else if (body instanceof ReadableStream) {
const response = new Response(body);
processedBody = await response.arrayBuffer();
} else if (ArrayBuffer.isView(body)) {
processedBody = body.buffer.slice(body.byteOffset, body.byteOffset + body.byteLength);
} else {
processedBody = body;
}
const contentLength = typeof processedBody === "string" ? new TextEncoder().encode(processedBody).length : processedBody.byteLength;
const headers = {
"Content-Type": contentType,
"Content-Length": String(contentLength)
};
if (putOptions?.customMetadata) {
for (const [key, value] of Object.entries(putOptions.customMetadata)) {
headers[`x-amz-meta-${snakeCase(key)}`] = encodeURIComponent(value);
}
}
if (putOptions?.access === "public") {
headers["x-amz-acl"] = "public-read";
}
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}`, {
method: "PUT",
headers,
body: processedBody
});
if (!res.ok) {
const text = await res.text();
throw new Error(`S3 put failed: ${res.status} ${res.statusText} ${text}`);
}
const etag = res.headers.get("ETag") || "";
return {
pathname,
contentType,
size: contentLength,
httpEtag: etag,
uploadedAt: /* @__PURE__ */ new Date(),
httpMetadata: {},
customMetadata: putOptions?.customMetadata || {}
};
},
async head(pathname) {
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}`, {
method: "HEAD"
});
if (res.status === 404) {
return null;
}
if (!res.ok) {
throw new Error(`S3 head failed: ${res.status} ${res.statusText}`);
}
const contentType = res.headers.get("content-type") || getContentType(pathname);
const contentLength = Number.parseInt(res.headers.get("content-length") || "0", 10);
const etag = res.headers.get("etag") || "";
const lastModified = res.headers.get("last-modified");
const customMetadata = {};
res.headers.forEach((value, key) => {
if (key.toLowerCase().startsWith("x-amz-meta-")) {
const metaKey = camelCase(key.substring("x-amz-meta-".length));
customMetadata[metaKey] = decodeURIComponent(value);
}
});
return {
pathname: decodeURIComponent(pathname),
contentType,
size: contentLength,
httpEtag: etag,
uploadedAt: lastModified ? new Date(lastModified) : /* @__PURE__ */ new Date(),
httpMetadata: {},
customMetadata
};
},
async hasItem(pathname) {
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}`, {
method: "HEAD"
});
return res.ok;
},
async delete(pathnames) {
const paths = Array.isArray(pathnames) ? pathnames : [pathnames];
if (paths.length === 1) {
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(paths[0]))}`, {
method: "DELETE"
});
if (!res.ok && res.status !== 404) {
throw new Error(`S3 delete failed: ${res.status} ${res.statusText}`);
}
} else {
const deleteXml = `<?xml version="1.0" encoding="UTF-8"?>
<Delete>
${paths.map((p) => `<Object><Key>${decodeURIComponent(p)}</Key></Object>`).join("")}
</Delete>`;
const res = await aws.fetch(`${bucketUrl}?delete`, {
method: "POST",
headers: { "Content-Type": "application/xml" },
body: deleteXml
});
if (!res.ok) {
const text = await res.text();
throw new Error(`S3 batch delete failed: ${res.status} ${res.statusText} ${text}`);
}
}
},
async createMultipartUpload(pathname, mpuOptions) {
const headers = buildCreateHeaders(mpuOptions);
const res = await aws.fetch(`${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}?uploads`, {
method: "POST",
headers
});
if (!res.ok) {
const text = await res.text();
throw new Error(`S3 initiate multipart upload failed: ${res.status} ${res.statusText} ${text}`);
}
const xml = await res.text();
const uploadId = xmlTagContentRequired(xml, "UploadId");
return createMultipartUploadObject(aws, bucketUrl, pathname, uploadId, mpuOptions);
},
async resumeMultipartUpload(pathname, uploadId) {
return createMultipartUploadObject(aws, bucketUrl, pathname, uploadId);
}
};
}
const buildCreateHeaders = (opts) => {
const headers = {};
if (opts?.contentType) headers["Content-Type"] = opts.contentType;
if (opts?.customMetadata) {
for (const [k, v] of Object.entries(opts.customMetadata)) {
headers[`x-amz-meta-${snakeCase(k)}`] = encodeURIComponent(v);
}
}
return headers;
};
function createMultipartUploadObject(aws, bucketUrl, pathname, uploadId, mpuOptions) {
const objectUrl = `${bucketUrl}/${encodeURI(decodeURIComponent(pathname))}`;
return {
pathname,
uploadId,
async uploadPart(partNumber, value) {
if (!Number.isInteger(partNumber) || partNumber < 1) {
throw new Error("partNumber must be a positive integer starting at 1");
}
const res = await aws.fetch(
`${objectUrl}?partNumber=${partNumber}&uploadId=${encodeURIComponent(uploadId)}`,
{
method: "PUT",
body: value
}
);
if (!res.ok) {
const text = await res.text();
throw new Error(`S3 upload part ${partNumber} failed: ${res.status} ${res.statusText} ${text}`);
}
const etag = res.headers.get("ETag");
if (!etag) {
throw new Error("Missing ETag on UploadPart response");
}
return { partNumber, etag };
},
async abort() {
const res = await aws.fetch(
`${objectUrl}?uploadId=${encodeURIComponent(uploadId)}`,
{ method: "DELETE" }
);
if (!res.ok) {
const text = await res.text();
throw new Error(`S3 abort multipart upload failed: ${res.status} ${res.statusText} ${text}`);
}
},
async complete(uploadedParts) {
if (!Array.isArray(uploadedParts) || uploadedParts.length === 0) {
throw new Error("uploadedParts must be a non-empty array");
}
const sortedParts = [...uploadedParts].sort((a, b) => a.partNumber - b.partNumber);
const res = await aws.fetch(
`${objectUrl}?uploadId=${encodeURIComponent(uploadId)}`,
{
method: "POST",
headers: { "Content-Type": "application/xml" },
body: xmlTag(
"CompleteMultipartUpload",
sortedParts.map(
(p) => xmlTag("Part", xmlTag("PartNumber", p.partNumber) + xmlTag("ETag", p.etag))
).join("")
)
}
);
if (!res.ok) {
const text = await res.text();
throw new Error(`S3 complete multipart upload failed: ${res.status} ${res.statusText} ${text}`);
}
const xml = await res.text();
const etag = xmlTagContent(xml, "ETag");
return {
pathname,
contentType: mpuOptions?.contentType || getContentType(pathname),
url: xmlTagContent(xml, "Location"),
httpEtag: etag,
uploadedAt: /* @__PURE__ */ new Date(),
httpMetadata: {},
customMetadata: mpuOptions?.customMetadata || {}
};
}
};
}