bun-types
Version:
Type definitions and documentation for Bun, an incredibly fast JavaScript runtime
1,769 lines (1,658 loc) โข 212 kB
TypeScript
/**
* Bun.js runtime APIs
*
* @example
*
* ```js
* import {file} from 'bun';
*
* // Log the file to the console
* const input = await file('/path/to/file.txt').text();
* console.log(input);
* ```
*
* This module aliases `globalThis.Bun`.
*/
declare module "bun" {
import type { FFIFunctionCallableSymbol } from "bun:ffi";
import type { Encoding as CryptoEncoding } from "crypto";
import type {
CipherNameAndProtocol,
EphemeralKeyInfo,
PeerCertificate,
} from "tls";
import type { Stats } from "node:fs";
import type { X509Certificate } from "node:crypto";
interface Env {
NODE_ENV?: string;
/**
* Can be used to change the default timezone at runtime
*/
TZ?: string;
}
/**
* The environment variables of the process
*
* Defaults to `process.env` as it was when the current Bun process launched.
*
* Changes to `process.env` at runtime won't automatically be reflected in the default value. For that, you can pass `process.env` explicitly.
*/
const env: NodeJS.ProcessEnv;
/**
* The raw arguments passed to the process, including flags passed to Bun. If you want to easily read flags passed to your script, consider using `process.argv` instead.
*/
const argv: string[];
const origin: string;
/**
* Find the path to an executable, similar to typing which in your terminal. Reads the `PATH` environment variable unless overridden with `options.PATH`.
*
* @param {string} command The name of the executable or script
* @param {string} options.PATH Overrides the PATH environment variable
* @param {string} options.cwd When given a relative path, use this path to join it.
*/
function which(
command: string,
options?: { PATH?: string; cwd?: string },
): string | null;
/**
* Get the column count of a string as it would be displayed in a terminal.
* Supports ANSI escape codes, emoji, and wide characters.
*
* This is useful for:
* - Aligning text in a terminal
* - Quickly checking if a string contains ANSI escape codes
* - Measuring the width of a string in a terminal
*
* This API is designed to match the popular "string-width" package, so that
* existing code can be easily ported to Bun and vice versa.
*
* @returns The width of the string in columns
*
* ## Examples
* @example
* ```ts
* import { stringWidth } from "bun";
*
* console.log(stringWidth("abc")); // 3
* console.log(stringWidth("๐ฉโ๐ฉโ๐งโ๐ฆ")); // 1
* console.log(stringWidth("\u001b[31mhello\u001b[39m")); // 5
* console.log(stringWidth("\u001b[31mhello\u001b[39m", { countAnsiEscapeCodes: false })); // 5
* console.log(stringWidth("\u001b[31mhello\u001b[39m", { countAnsiEscapeCodes: true })); // 13
* ```
*
*/
function stringWidth(
/**
* The string to measure
*/
input: string,
options?: {
/**
* If `true`, count ANSI escape codes as part of the string width. If `false`, ANSI escape codes are ignored when calculating the string width.
*
* @default false
*/
countAnsiEscapeCodes?: boolean;
/**
* When it's ambiugous and `true`, count emoji as 1 characters wide. If `false`, emoji are counted as 2 character wide.
*
* @default true
*/
ambiguousIsNarrow?: boolean;
},
): number;
export type ShellFunction = (input: Uint8Array) => Uint8Array;
export type ShellExpression =
| { toString(): string }
| Array<ShellExpression>
| string
| { raw: string }
| Subprocess
| SpawnOptions.Readable
| SpawnOptions.Writable
| ReadableStream;
class ShellError extends Error implements ShellOutput {
readonly stdout: Buffer;
readonly stderr: Buffer;
readonly exitCode: number;
/**
* Read from stdout as a string
*
* @param encoding - The encoding to use when decoding the output
* @returns Stdout as a string with the given encoding
* @example
*
* ## Read as UTF-8 string
*
* ```ts
* const output = await $`echo hello`;
* console.log(output.text()); // "hello\n"
* ```
*
* ## Read as base64 string
*
* ```ts
* const output = await $`echo ${atob("hello")}`;
* console.log(output.text("base64")); // "hello\n"
* ```
*
*/
text(encoding?: BufferEncoding): string;
/**
* Read from stdout as a JSON object
*
* @returns Stdout as a JSON object
* @example
*
* ```ts
* const output = await $`echo '{"hello": 123}'`;
* console.log(output.json()); // { hello: 123 }
* ```
*
*/
json(): any;
/**
* Read from stdout as an ArrayBuffer
*
* @returns Stdout as an ArrayBuffer
* @example
*
* ```ts
* const output = await $`echo hello`;
* console.log(output.arrayBuffer()); // ArrayBuffer { byteLength: 6 }
* ```
*/
arrayBuffer(): ArrayBuffer;
/**
* Read from stdout as a Blob
*
* @returns Stdout as a blob
* @example
* ```ts
* const output = await $`echo hello`;
* console.log(output.blob()); // Blob { size: 6, type: "" }
* ```
*/
blob(): Blob;
bytes(): Uint8Array;
}
class ShellPromise extends Promise<ShellOutput> {
get stdin(): WritableStream;
/**
* Change the current working directory of the shell.
* @param newCwd - The new working directory
*/
cwd(newCwd: string): this;
/**
* Set environment variables for the shell.
* @param newEnv - The new environment variables
*
* @example
* ```ts
* await $`echo $FOO`.env({ ...process.env, FOO: "LOL!" })
* expect(stdout.toString()).toBe("LOL!");
* ```
*/
env(newEnv: Record<string, string> | undefined): this;
/**
* By default, the shell will write to the current process's stdout and stderr, as well as buffering that output.
*
* This configures the shell to only buffer the output.
*/
quiet(): this;
/**
* Read from stdout as a string, line by line
*
* Automatically calls {@link quiet} to disable echoing to stdout.
*/
lines(): AsyncIterable<string>;
/**
* Read from stdout as a string
*
* Automatically calls {@link quiet} to disable echoing to stdout.
* @param encoding - The encoding to use when decoding the output
* @returns A promise that resolves with stdout as a string
* @example
*
* ## Read as UTF-8 string
*
* ```ts
* const output = await $`echo hello`.text();
* console.log(output); // "hello\n"
* ```
*
* ## Read as base64 string
*
* ```ts
* const output = await $`echo ${atob("hello")}`.text("base64");
* console.log(output); // "hello\n"
* ```
*
*/
text(encoding?: BufferEncoding): Promise<string>;
/**
* Read from stdout as a JSON object
*
* Automatically calls {@link quiet}
*
* @returns A promise that resolves with stdout as a JSON object
* @example
*
* ```ts
* const output = await $`echo '{"hello": 123}'`.json();
* console.log(output); // { hello: 123 }
* ```
*
*/
json(): Promise<any>;
/**
* Read from stdout as an ArrayBuffer
*
* Automatically calls {@link quiet}
* @returns A promise that resolves with stdout as an ArrayBuffer
* @example
*
* ```ts
* const output = await $`echo hello`.arrayBuffer();
* console.log(output); // ArrayBuffer { byteLength: 6 }
* ```
*/
arrayBuffer(): Promise<ArrayBuffer>;
/**
* Read from stdout as a Blob
*
* Automatically calls {@link quiet}
* @returns A promise that resolves with stdout as a Blob
* @example
* ```ts
* const output = await $`echo hello`.blob();
* console.log(output); // Blob { size: 6, type: "" }
* ```
*/
blob(): Promise<Blob>;
/**
* Configure the shell to not throw an exception on non-zero exit codes. Throwing can be re-enabled with `.throws(true)`.
*
* By default, the shell with throw an exception on commands which return non-zero exit codes.
*/
nothrow(): this;
/**
* Configure whether or not the shell should throw an exception on non-zero exit codes.
*
* By default, this is configured to `true`.
*/
throws(shouldThrow: boolean): this;
}
interface ShellConstructor {
new (): Shell;
}
export interface Shell {
(
strings: TemplateStringsArray,
...expressions: ShellExpression[]
): ShellPromise;
/**
* Perform bash-like brace expansion on the given pattern.
* @param pattern - Brace pattern to expand
*
* @example
* ```js
* const result = braces('index.{js,jsx,ts,tsx}');
* console.log(result) // ['index.js', 'index.jsx', 'index.ts', 'index.tsx']
* ```
*/
braces(pattern: string): string[];
/**
* Escape strings for input into shell commands.
* @param input
*/
escape(input: string): string;
/**
*
* Change the default environment variables for shells created by this instance.
*
* @param newEnv Default environment variables to use for shells created by this instance.
* @default process.env
*
* ## Example
*
* ```js
* import {$} from 'bun';
* $.env({ BUN: "bun" });
* await $`echo $BUN`;
* // "bun"
* ```
*/
env(newEnv?: Record<string, string | undefined>): this;
/**
*
* @param newCwd Default working directory to use for shells created by this instance.
*/
cwd(newCwd?: string): this;
/**
* Configure the shell to not throw an exception on non-zero exit codes.
*/
nothrow(): this;
/**
* Configure whether or not the shell should throw an exception on non-zero exit codes.
*/
throws(shouldThrow: boolean): this;
readonly ShellPromise: typeof ShellPromise;
readonly Shell: ShellConstructor;
}
export interface ShellOutput {
readonly stdout: Buffer;
readonly stderr: Buffer;
readonly exitCode: number;
/**
* Read from stdout as a string
*
* @param encoding - The encoding to use when decoding the output
* @returns Stdout as a string with the given encoding
* @example
*
* ## Read as UTF-8 string
*
* ```ts
* const output = await $`echo hello`;
* console.log(output.text()); // "hello\n"
* ```
*
* ## Read as base64 string
*
* ```ts
* const output = await $`echo ${atob("hello")}`;
* console.log(output.text("base64")); // "hello\n"
* ```
*
*/
text(encoding?: BufferEncoding): string;
/**
* Read from stdout as a JSON object
*
* @returns Stdout as a JSON object
* @example
*
* ```ts
* const output = await $`echo '{"hello": 123}'`;
* console.log(output.json()); // { hello: 123 }
* ```
*
*/
json(): any;
/**
* Read from stdout as an ArrayBuffer
*
* @returns Stdout as an ArrayBuffer
* @example
*
* ```ts
* const output = await $`echo hello`;
* console.log(output.arrayBuffer()); // ArrayBuffer { byteLength: 6 }
* ```
*/
arrayBuffer(): ArrayBuffer;
/**
* Read from stdout as an Uint8Array
*
* @returns Stdout as an Uint8Array
* @example
*
* ```ts
* const output = await $`echo hello`;
* console.log(output.bytes()); // Uint8Array { byteLength: 6 }
* ```
*/
bytes(): Uint8Array;
/**
* Read from stdout as a Blob
*
* @returns Stdout as a blob
* @example
* ```ts
* const output = await $`echo hello`;
* console.log(output.blob()); // Blob { size: 6, type: "" }
* ```
*/
blob(): Blob;
}
export const $: Shell;
interface TOML {
/**
* Parse a TOML string into a JavaScript object.
*
* @param {string} command The name of the executable or script
* @param {string} options.PATH Overrides the PATH environment variable
* @param {string} options.cwd Limits the search to a particular directory in which to searc
*/
parse(input: string): object;
}
const TOML: TOML;
type Serve<WebSocketDataType = undefined> =
| ServeOptions
| TLSServeOptions
| UnixServeOptions
| UnixTLSServeOptions
| WebSocketServeOptions<WebSocketDataType>
| TLSWebSocketServeOptions<WebSocketDataType>
| UnixWebSocketServeOptions<WebSocketDataType>
| UnixTLSWebSocketServeOptions<WebSocketDataType>;
/**
* Start a fast HTTP server.
*
* @param options Server options (port defaults to $PORT || 3000)
*
* -----
*
* @example
*
* ```ts
* Bun.serve({
* fetch(req: Request): Response | Promise<Response> {
* return new Response("Hello World!");
* },
*
* // Optional port number - the default value is 3000
* port: process.env.PORT || 3000,
* });
* ```
* -----
*
* @example
*
* Send a file
*
* ```ts
* Bun.serve({
* fetch(req: Request): Response | Promise<Response> {
* return new Response(Bun.file("./package.json"));
* },
*
* // Optional port number - the default value is 3000
* port: process.env.PORT || 3000,
* });
* ```
*/
// eslint-disable-next-line @definitelytyped/no-unnecessary-generics
function serve<T>(options: Serve<T>): Server;
/**
* Synchronously resolve a `moduleId` as though it were imported from `parent`
*
* On failure, throws a `ResolveMessage`
*/
// tslint:disable-next-line:unified-signatures
function resolveSync(moduleId: string, parent: string): string;
/**
* Resolve a `moduleId` as though it were imported from `parent`
*
* On failure, throws a `ResolveMessage`
*
* For now, use the sync version. There is zero performance benefit to using this async version. It exists for future-proofing.
*/
// tslint:disable-next-line:unified-signatures
function resolve(moduleId: string, parent: string): Promise<string>;
/**
* Use the fastest syscalls available to copy from `input` into `destination`.
*
* If `destination` exists, it must be a regular file or symlink to a file. If `destination`'s directory does not exist, it will be created by default.
*
* @param destination The file or file path to write to
* @param input The data to copy into `destination`.
* @returns A promise that resolves with the number of bytes written.
*/
// tslint:disable-next-line:unified-signatures
function write(
destination: BunFile | S3File | Bun.PathLike,
input: Blob | NodeJS.TypedArray | ArrayBufferLike | string | Bun.BlobPart[],
options?: {
/** If writing to a PathLike, set the permissions of the file. */
mode?: number;
/**
* If `true`, create the parent directory if it doesn't exist. By default, this is `true`.
*
* If `false`, this will throw an error if the directory doesn't exist.
*
* @default true
*/
createPath?: boolean;
},
): Promise<number>;
/**
* Persist a {@link Response} body to disk.
*
* @param destination The file to write to. If the file doesn't exist,
* it will be created and if the file does exist, it will be
* overwritten. If `input`'s size is less than `destination`'s size,
* `destination` will be truncated.
* @param input - `Response` object
* @returns A promise that resolves with the number of bytes written.
*/
function write(
destination: BunFile,
input: Response,
options?: {
/**
* If `true`, create the parent directory if it doesn't exist. By default, this is `true`.
*
* If `false`, this will throw an error if the directory doesn't exist.
*
* @default true
*/
createPath?: boolean;
},
): Promise<number>;
/**
* Persist a {@link Response} body to disk.
*
* @param destinationPath The file path to write to. If the file doesn't
* exist, it will be created and if the file does exist, it will be
* overwritten. If `input`'s size is less than `destination`'s size,
* `destination` will be truncated.
* @param input - `Response` object
* @returns A promise that resolves with the number of bytes written.
*/
// tslint:disable-next-line:unified-signatures
function write(
destinationPath: Bun.PathLike,
input: Response,
options?: {
/**
* If `true`, create the parent directory if it doesn't exist. By default, this is `true`.
*
* If `false`, this will throw an error if the directory doesn't exist.
*
* @default true
*/
createPath?: boolean;
},
): Promise<number>;
/**
* Use the fastest syscalls available to copy from `input` into `destination`.
*
* If `destination` exists, it must be a regular file or symlink to a file.
*
* On Linux, this uses `copy_file_range`.
*
* On macOS, when the destination doesn't already exist, this uses
* [`clonefile()`](https://www.manpagez.com/man/2/clonefile/) and falls
* back to [`fcopyfile()`](https://www.manpagez.com/man/2/fcopyfile/)
*
* @param destination The file to write to. If the file doesn't exist,
* it will be created and if the file does exist, it will be
* overwritten. If `input`'s size is less than `destination`'s size,
* `destination` will be truncated.
* @param input The file to copy from.
* @returns A promise that resolves with the number of bytes written.
*/
// tslint:disable-next-line:unified-signatures
function write(
destination: BunFile,
input: BunFile,
options?: {
/**
* If `true`, create the parent directory if it doesn't exist. By default, this is `true`.
*
* If `false`, this will throw an error if the directory doesn't exist.
*
* @default true
*/
createPath?: boolean;
},
): Promise<number>;
/**
* Use the fastest syscalls available to copy from `input` into `destination`.
*
* If `destination` exists, it must be a regular file or symlink to a file.
*
* On Linux, this uses `copy_file_range`.
*
* On macOS, when the destination doesn't already exist, this uses
* [`clonefile()`](https://www.manpagez.com/man/2/clonefile/) and falls
* back to [`fcopyfile()`](https://www.manpagez.com/man/2/fcopyfile/)
*
* @param destinationPath The file path to write to. If the file doesn't
* exist, it will be created and if the file does exist, it will be
* overwritten. If `input`'s size is less than `destination`'s size,
* `destination` will be truncated.
* @param input The file to copy from.
* @returns A promise that resolves with the number of bytes written.
*/
// tslint:disable-next-line:unified-signatures
function write(
destinationPath: Bun.PathLike,
input: BunFile,
options?: {
/**
* If `true`, create the parent directory if it doesn't exist. By default, this is `true`.
*
* If `false`, this will throw an error if the directory doesn't exist.
*
* @default true
*/
createPath?: boolean;
},
): Promise<number>;
interface SystemError extends Error {
errno?: number | undefined;
code?: string | undefined;
path?: string | undefined;
syscall?: string | undefined;
}
/**
* Concatenate an array of typed arrays into a single `ArrayBuffer`. This is a fast path.
*
* You can do this manually if you'd like, but this function will generally
* be a little faster.
*
* If you want a `Uint8Array` instead, consider `Buffer.concat`.
*
* @param buffers An array of typed arrays to concatenate.
* @returns An `ArrayBuffer` with the data from all the buffers.
*
* Here is similar code to do it manually, except about 30% slower:
* ```js
* var chunks = [...];
* var size = 0;
* for (const chunk of chunks) {
* size += chunk.byteLength;
* }
* var buffer = new ArrayBuffer(size);
* var view = new Uint8Array(buffer);
* var offset = 0;
* for (const chunk of chunks) {
* view.set(chunk, offset);
* offset += chunk.byteLength;
* }
* return buffer;
* ```
*
* This function is faster because it uses uninitialized memory when copying. Since the entire
* length of the buffer is known, it is safe to use uninitialized memory.
*/
function concatArrayBuffers(
buffers: Array<ArrayBufferView | ArrayBufferLike>,
maxLength?: number,
): ArrayBuffer;
function concatArrayBuffers(
buffers: Array<ArrayBufferView | ArrayBufferLike>,
maxLength: number,
asUint8Array: false,
): ArrayBuffer;
function concatArrayBuffers(
buffers: Array<ArrayBufferView | ArrayBufferLike>,
maxLength: number,
asUint8Array: true,
): Uint8Array;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* Concatenate the chunks into a single {@link ArrayBuffer}.
*
* Each chunk must be a TypedArray or an ArrayBuffer. If you need to support
* chunks of different types, consider {@link readableStreamToBlob}
*
* @param stream The stream to consume.
* @returns A promise that resolves with the concatenated chunks or the concatenated chunks as an `ArrayBuffer`.
*/
function readableStreamToArrayBuffer(
stream: ReadableStream<ArrayBufferView | ArrayBufferLike>,
): Promise<ArrayBuffer> | ArrayBuffer;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* Concatenate the chunks into a single {@link ArrayBuffer}.
*
* Each chunk must be a TypedArray or an ArrayBuffer. If you need to support
* chunks of different types, consider {@link readableStreamToBlob}
*
* @param stream The stream to consume.
* @returns A promise that resolves with the concatenated chunks or the concatenated chunks as a {@link Uint8Array}.
*/
function readableStreamToBytes(
stream: ReadableStream<ArrayBufferView | ArrayBufferLike>,
): Promise<Uint8Array> | Uint8Array;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* Concatenate the chunks into a single {@link Blob}.
*
* @param stream The stream to consume.
* @returns A promise that resolves with the concatenated chunks as a {@link Blob}.
*/
function readableStreamToBlob(stream: ReadableStream): Promise<Blob>;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* Reads the multi-part or URL-encoded form data into a {@link FormData} object
*
* @param stream The stream to consume.
* @param multipartBoundaryExcludingDashes Optional boundary to use for multipart form data. If none is provided, assumes it is a URLEncoded form.
* @returns A promise that resolves with the data encoded into a {@link FormData} object.
*
* ## Multipart form data example
*
* ```ts
* // without dashes
* const boundary = "WebKitFormBoundary" + Math.random().toString(16).slice(2);
*
* const myStream = getStreamFromSomewhere() // ...
* const formData = await Bun.readableStreamToFormData(stream, boundary);
* formData.get("foo"); // "bar"
* ```
* ## URL-encoded form data example
*
* ```ts
* const stream = new Response("hello=123").body;
* const formData = await Bun.readableStreamToFormData(stream);
* formData.get("hello"); // "123"
* ```
*/
function readableStreamToFormData(
stream: ReadableStream<string | NodeJS.TypedArray | ArrayBufferView>,
multipartBoundaryExcludingDashes?:
| string
| NodeJS.TypedArray
| ArrayBufferView,
): Promise<FormData>;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* Concatenate the chunks into a single string. Chunks must be a TypedArray or an ArrayBuffer. If you need to support chunks of different types, consider {@link readableStreamToBlob}.
*
* @param stream The stream to consume.
* @returns A promise that resolves with the concatenated chunks as a {@link String}.
*/
function readableStreamToText(stream: ReadableStream): Promise<string>;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* Concatenate the chunks into a single string and parse as JSON. Chunks must be a TypedArray or an ArrayBuffer. If you need to support chunks of different types, consider {@link readableStreamToBlob}.
*
* @param stream The stream to consume.
* @returns A promise that resolves with the concatenated chunks as a {@link String}.
*/
function readableStreamToJSON(stream: ReadableStream): Promise<any>;
/**
* Consume all data from a {@link ReadableStream} until it closes or errors.
*
* @param stream The stream to consume
* @returns A promise that resolves with the chunks as an array
*/
function readableStreamToArray<T>(
stream: ReadableStream<T>,
): Promise<T[]> | T[];
/**
* Escape the following characters in a string:
*
* - `"` becomes `"""`
* - `&` becomes `"&"`
* - `'` becomes `"'"`
* - `<` becomes `"<"`
* - `>` becomes `">"`
*
* This function is optimized for large input. On an M1X, it processes 480 MB/s -
* 20 GB/s, depending on how much data is being escaped and whether there is non-ascii
* text.
*
* Non-string types will be converted to a string before escaping.
*/
function escapeHTML(input: string | object | number | boolean): string;
/**
* Convert a filesystem path to a file:// URL.
*
* @param path The path to convert.
* @returns A {@link URL} with the file:// scheme.
*
* @example
* ```js
* const url = Bun.pathToFileURL("/foo/bar.txt");
* console.log(url.href); // "file:///foo/bar.txt"
* ```
*
* Internally, this function uses WebKit's URL API to
* convert the path to a file:// URL.
*/
function pathToFileURL(path: string): URL;
interface Peek {
<T = undefined>(promise: T | Promise<T>): Promise<T> | T;
status<T = undefined>(
promise: T | Promise<T>,
): "pending" | "fulfilled" | "rejected";
}
/**
* Extract the value from the Promise in the same tick of the event loop
*/
const peek: Peek;
/**
* Convert a {@link URL} to a filesystem path.
* @param url The URL to convert.
* @returns A filesystem path.
* @throws If the URL is not a URL.
* @example
* ```js
* const path = Bun.fileURLToPath(new URL("file:///foo/bar.txt"));
* console.log(path); // "/foo/bar.txt"
* ```
*/
function fileURLToPath(url: URL | string): string;
/**
* Fast incremental writer that becomes an `ArrayBuffer` on end().
*/
class ArrayBufferSink {
constructor();
start(options?: {
asUint8Array?: boolean;
/**
* Preallocate an internal buffer of this size
* This can significantly improve performance when the chunk size is small
*/
highWaterMark?: number;
/**
* On {@link ArrayBufferSink.flush}, return the written data as a `Uint8Array`.
* Writes will restart from the beginning of the buffer.
*/
stream?: boolean;
}): void;
write(
chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer,
): number;
/**
* Flush the internal buffer
*
* If {@link ArrayBufferSink.start} was passed a `stream` option, this will return a `ArrayBuffer`
* If {@link ArrayBufferSink.start} was passed a `stream` option and `asUint8Array`, this will return a `Uint8Array`
* Otherwise, this will return the number of bytes written since the last flush
*
* This API might change later to separate Uint8ArraySink and ArrayBufferSink
*/
flush(): number | Uint8Array | ArrayBuffer;
end(): ArrayBuffer | Uint8Array;
}
const dns: {
/**
* Lookup the IP address for a hostname
*
* Uses non-blocking APIs by default
*
* @param hostname The hostname to lookup
* @param options Options for the lookup
*
* ## Example
*
* ```js
* const [{ address }] = await Bun.dns.lookup('example.com');
* ```
*
* ### Filter results to IPv4:
*
* ```js
* import { dns } from 'bun';
* const [{ address }] = await dns.lookup('example.com', {family: 4});
* console.log(address); // "123.122.22.126"
* ```
*
* ### Filter results to IPv6:
*
* ```js
* import { dns } from 'bun';
* const [{ address }] = await dns.lookup('example.com', {family: 6});
* console.log(address); // "2001:db8::1"
* ```
*
* #### DNS resolver client
*
* Bun supports three DNS resolvers:
* - `c-ares` - Uses the c-ares library to perform DNS resolution. This is the default on Linux.
* - `system` - Uses the system's non-blocking DNS resolver API if available, falls back to `getaddrinfo`. This is the default on macOS and the same as `getaddrinfo` on Linux.
* - `getaddrinfo` - Uses the posix standard `getaddrinfo` function. Will cause performance issues under concurrent loads.
*
* To customize the DNS resolver, pass a `backend` option to `dns.lookup`:
* ```js
* import { dns } from 'bun';
* const [{ address }] = await dns.lookup('example.com', {backend: 'getaddrinfo'});
* console.log(address); // "19.42.52.62"
* ```
*/
lookup(
hostname: string,
options?: {
/**
* Limit results to either IPv4, IPv6, or both
*/
family?: 4 | 6 | 0 | "IPv4" | "IPv6" | "any";
/**
* Limit results to either UDP or TCP
*/
socketType?: "udp" | "tcp";
flags?: number;
port?: number;
/**
* The DNS resolver implementation to use
*
* Defaults to `"c-ares"` on Linux and `"system"` on macOS. This default
* may change in a future version of Bun if c-ares is not reliable
* enough.
*
* On macOS, `system` uses the builtin macOS [non-blocking DNS
* resolution
* API](https://opensource.apple.com/source/Libinfo/Libinfo-222.1/lookup.subproj/netdb_async.h.auto.html).
*
* On Linux, `system` is the same as `getaddrinfo`.
*
* `c-ares` is more performant on Linux in some high concurrency
* situations, but it lacks support support for mDNS (`*.local`,
* `*.localhost` domains) along with some other advanced features. If
* you run into issues using `c-ares`, you should try `system`. If the
* hostname ends with `.local` or `.localhost`, Bun will automatically
* use `system` instead of `c-ares`.
*
* [`getaddrinfo`](https://man7.org/linux/man-pages/man3/getaddrinfo.3.html)
* is the POSIX standard function for blocking DNS resolution. Bun runs
* it in Bun's thread pool, which is limited to `cpus / 2`. That means
* if you run a lot of concurrent DNS lookups, concurrent IO will
* potentially pause until the DNS lookups are done.
*
* On macOS, it shouldn't be necessary to use "`getaddrinfo`" because
* `"system"` uses the same API underneath (except non-blocking).
*
* On Windows, libuv's non-blocking DNS resolver is used by default, and
* when specifying backends "system", "libc", or "getaddrinfo". The c-ares
* backend isn't currently supported on Windows.
*/
backend?: "libc" | "c-ares" | "system" | "getaddrinfo";
},
): Promise<DNSLookup[]>;
/**
*
* **Experimental API**
*
* Prefetch a hostname.
*
* This will be used by fetch() and Bun.connect() to avoid DNS lookups.
*
* @param hostname The hostname to prefetch
*
* @example
* ```js
* import { dns } from 'bun';
* dns.prefetch('example.com');
* // ... something expensive
* await fetch('https://example.com');
* ```
*/
prefetch(hostname: string): void;
/**
* **Experimental API**
*/
getCacheStats(): {
/**
* The number of times a cached DNS entry that was already resolved was used.
*/
cacheHitsCompleted: number;
cacheHitsInflight: number;
cacheMisses: number;
size: number;
errors: number;
totalCount: number;
};
ADDRCONFIG: number;
ALL: number;
V4MAPPED: number;
};
interface DNSLookup {
/**
* The IP address of the host as a string in IPv4 or IPv6 format.
*
* @example "127.0.0.1"
* @example "192.168.0.1"
* @example "2001:4860:4860::8888"
*/
address: string;
family: 4 | 6;
/**
* Time to live in seconds
*
* Only supported when using the `c-ares` DNS resolver via "backend" option
* to {@link dns.lookup}. Otherwise, it's 0.
*/
ttl: number;
}
/**
* Fast incremental writer for files and pipes.
*
* This uses the same interface as {@link ArrayBufferSink}, but writes to a file or pipe.
*/
interface FileSink {
/**
* Write a chunk of data to the file.
*
* If the file descriptor is not writable yet, the data is buffered.
*/
write(
chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer,
): number;
/**
* Flush the internal buffer, committing the data to disk or the pipe.
*/
flush(): number | Promise<number>;
/**
* Close the file descriptor. This also flushes the internal buffer.
*/
end(error?: Error): number | Promise<number>;
start(options?: {
/**
* Preallocate an internal buffer of this size
* This can significantly improve performance when the chunk size is small
*/
highWaterMark?: number;
}): void;
/**
* For FIFOs & pipes, this lets you decide whether Bun's process should
* remain alive until the pipe is closed.
*
* By default, it is automatically managed. While the stream is open, the
* process remains alive and once the other end hangs up or the stream
* closes, the process exits.
*
* If you previously called {@link unref}, you can call this again to re-enable automatic management.
*
* Internally, it will reference count the number of times this is called. By default, that number is 1
*
* If the file is not a FIFO or pipe, {@link ref} and {@link unref} do
* nothing. If the pipe is already closed, this does nothing.
*/
ref(): void;
/**
* For FIFOs & pipes, this lets you decide whether Bun's process should
* remain alive until the pipe is closed.
*
* If you want to allow Bun's process to terminate while the stream is open,
* call this.
*
* If the file is not a FIFO or pipe, {@link ref} and {@link unref} do
* nothing. If the pipe is already closed, this does nothing.
*/
unref(): void;
}
interface FileBlob extends BunFile {}
/**
* [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob) powered by the fastest system calls available for operating on files.
*
* This Blob is lazy. That means it won't do any work until you read from it.
*
* - `size` will not be valid until the contents of the file are read at least once.
* - `type` is auto-set based on the file extension when possible
*
* @example
* ```js
* const file = Bun.file("./hello.json");
* console.log(file.type); // "application/json"
* console.log(await file.text()); // '{"hello":"world"}'
* ```
*
* @example
* ```js
* await Bun.write(
* Bun.file("./hello.txt"),
* "Hello, world!"
* );
* ```
*/
interface BunFile extends Blob {
/**
* Offset any operation on the file starting at `begin` and ending at `end`. `end` is relative to 0
*
* Similar to [`TypedArray.subarray`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/subarray). Does not copy the file, open the file, or modify the file.
*
* If `begin` > 0, {@link Bun.write()} will be slower on macOS
*
* @param begin - start offset in bytes
* @param end - absolute offset in bytes (relative to 0)
* @param contentType - MIME type for the new BunFile
*/
slice(begin?: number, end?: number, contentType?: string): BunFile;
/** */
/**
* Offset any operation on the file starting at `begin`
*
* Similar to [`TypedArray.subarray`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/subarray). Does not copy the file, open the file, or modify the file.
*
* If `begin` > 0, {@link Bun.write()} will be slower on macOS
*
* @param begin - start offset in bytes
* @param contentType - MIME type for the new BunFile
*/
slice(begin?: number, contentType?: string): BunFile;
/**
* @param contentType - MIME type for the new BunFile
*/
slice(contentType?: string): BunFile;
/**
* Incremental writer for files and pipes.
*/
writer(options?: { highWaterMark?: number }): FileSink;
readonly readable: ReadableStream;
// TODO: writable: WritableStream;
/**
* A UNIX timestamp indicating when the file was last modified.
*/
lastModified: number;
/**
* The name or path of the file, as specified in the constructor.
*/
readonly name?: string;
/**
* Does the file exist?
*
* This returns true for regular files and FIFOs. It returns false for
* directories. Note that a race condition can occur where the file is
* deleted or renamed after this is called but before you open it.
*
* This does a system call to check if the file exists, which can be
* slow.
*
* If using this in an HTTP server, it's faster to instead use `return new
* Response(Bun.file(path))` and then an `error` handler to handle
* exceptions.
*
* Instead of checking for a file's existence and then performing the
* operation, it is faster to just perform the operation and handle the
* error.
*
* For empty Blob, this always returns true.
*/
exists(): Promise<boolean>;
/**
* Write data to the file. This is equivalent to using {@link Bun.write} with a {@link BunFile}.
* @param data - The data to write.
* @param options - The options to use for the write.
*/
write(
data:
| string
| ArrayBufferView
| ArrayBuffer
| SharedArrayBuffer
| Request
| Response
| BunFile,
options?: { highWaterMark?: number },
): Promise<number>;
/**
* Deletes the file.
*/
unlink(): Promise<void>;
/**
* Deletes the file. ( same as unlink )
*/
delete(): Promise<void>;
/**
* Provides useful information about the file.
*/
stat(): Promise<Stats>;
}
interface NetworkSink extends FileSink {
/**
* Write a chunk of data to the network.
*
* If the network is not writable yet, the data is buffered.
*/
write(
chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer,
): number;
/**
* Flush the internal buffer, committing the data to the network.
*/
flush(): number | Promise<number>;
/**
* Finish the upload. This also flushes the internal buffer.
*/
end(error?: Error): number | Promise<number>;
/**
* Get the stat of the file.
*/
stat(): Promise<Stats>;
}
var S3Client: S3Client;
var s3: S3Client;
/**
* Configuration options for S3 operations
*/
interface S3Options extends BlobPropertyBag {
/**
* The Access Control List (ACL) policy for the file.
* Controls who can access the file and what permissions they have.
*
* @example
* // Setting public read access
* const file = s3("public-file.txt", {
* acl: "public-read",
* bucket: "my-bucket"
* });
*
* @example
* // Using with presigned URLs
* const url = file.presign({
* acl: "public-read",
* expiresIn: 3600
* });
*/
acl?:
| "private"
| "public-read"
| "public-read-write"
| "aws-exec-read"
| "authenticated-read"
| "bucket-owner-read"
| "bucket-owner-full-control"
| "log-delivery-write";
/**
* The S3 bucket name. Can be set via `S3_BUCKET` or `AWS_BUCKET` environment variables.
*
* @example
* // Using explicit bucket
* const file = s3("my-file.txt", { bucket: "my-bucket" });
*
* @example
* // Using environment variables
* // With S3_BUCKET=my-bucket in .env
* const file = s3("my-file.txt");
*/
bucket?: string;
/**
* The AWS region. Can be set via `S3_REGION` or `AWS_REGION` environment variables.
*
* @example
* const file = s3("my-file.txt", {
* bucket: "my-bucket",
* region: "us-west-2"
* });
*/
region?: string;
/**
* The access key ID for authentication.
* Can be set via `S3_ACCESS_KEY_ID` or `AWS_ACCESS_KEY_ID` environment variables.
*/
accessKeyId?: string;
/**
* The secret access key for authentication.
* Can be set via `S3_SECRET_ACCESS_KEY` or `AWS_SECRET_ACCESS_KEY` environment variables.
*/
secretAccessKey?: string;
/**
* Optional session token for temporary credentials.
* Can be set via `S3_SESSION_TOKEN` or `AWS_SESSION_TOKEN` environment variables.
*
* @example
* // Using temporary credentials
* const file = s3("my-file.txt", {
* accessKeyId: tempAccessKey,
* secretAccessKey: tempSecretKey,
* sessionToken: tempSessionToken
* });
*/
sessionToken?: string;
/**
* The S3-compatible service endpoint URL.
* Can be set via `S3_ENDPOINT` or `AWS_ENDPOINT` environment variables.
*
* @example
* // AWS S3
* const file = s3("my-file.txt", {
* endpoint: "https://s3.us-east-1.amazonaws.com"
* });
*
* @example
* // Cloudflare R2
* const file = s3("my-file.txt", {
* endpoint: "https://<account-id>.r2.cloudflarestorage.com"
* });
*
* @example
* // DigitalOcean Spaces
* const file = s3("my-file.txt", {
* endpoint: "https://<region>.digitaloceanspaces.com"
* });
*
* @example
* // MinIO (local development)
* const file = s3("my-file.txt", {
* endpoint: "http://localhost:9000"
* });
*/
endpoint?: string;
/**
* The size of each part in multipart uploads (in bytes).
* - Minimum: 5 MiB
* - Maximum: 5120 MiB
* - Default: 5 MiB
*
* @example
* // Configuring multipart uploads
* const file = s3("large-file.dat", {
* partSize: 10 * 1024 * 1024, // 10 MiB parts
* queueSize: 4 // Upload 4 parts in parallel
* });
*
* const writer = file.writer();
* // ... write large file in chunks
*/
partSize?: number;
/**
* Number of parts to upload in parallel for multipart uploads.
* - Default: 5
* - Maximum: 255
*
* Increasing this value can improve upload speeds for large files
* but will use more memory.
*/
queueSize?: number;
/**
* Number of retry attempts for failed uploads.
* - Default: 3
* - Maximum: 255
*
* @example
* // Setting retry attempts
* const file = s3("my-file.txt", {
* retry: 5 // Retry failed uploads up to 5 times
* });
*/
retry?: number;
/**
* The Content-Type of the file.
* Automatically set based on file extension when possible.
*
* @example
* // Setting explicit content type
* const file = s3("data.bin", {
* type: "application/octet-stream"
* });
*/
type?: string;
/**
* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects.
*
* @example
* // Setting explicit Storage class
* const file = s3("my-file.json", {
* storageClass: "STANDARD_IA"
* });
*/
storageClass?:
| "STANDARD"
| "DEEP_ARCHIVE"
| "EXPRESS_ONEZONE"
| "GLACIER"
| "GLACIER_IR"
| "INTELLIGENT_TIERING"
| "ONEZONE_IA"
| "OUTPOSTS"
| "REDUCED_REDUNDANCY"
| "SNOW"
| "STANDARD_IA";
/**
* @deprecated The size of the internal buffer in bytes. Defaults to 5 MiB. use `partSize` and `queueSize` instead.
*/
highWaterMark?: number;
}
/**
* Options for generating presigned URLs
*/
interface S3FilePresignOptions extends S3Options {
/**
* Number of seconds until the presigned URL expires.
* - Default: 86400 (1 day)
*
* @example
* // Short-lived URL
* const url = file.presign({
* expiresIn: 3600 // 1 hour
* });
*
* @example
* // Long-lived public URL
* const url = file.presign({
* expiresIn: 7 * 24 * 60 * 60, // 7 days
* acl: "public-read"
* });
*/
expiresIn?: number;
/**
* The HTTP method allowed for the presigned URL.
*
* @example
* // GET URL for downloads
* const downloadUrl = file.presign({
* method: "GET",
* expiresIn: 3600
* });
*
* @example
* // PUT URL for uploads
* const uploadUrl = file.presign({
* method: "PUT",
* expiresIn: 3600,
* type: "application/json"
* });
*/
method?: "GET" | "POST" | "PUT" | "DELETE" | "HEAD";
}
interface S3Stats {
size: number;
lastModified: Date;
etag: string;
type: string;
}
/**
* Represents a file in an S3-compatible storage service.
* Extends the Blob interface for compatibility with web APIs.
*/
interface S3File extends Blob {
/**
* The size of the file in bytes.
* This is a Promise because it requires a network request to determine the size.
*
* @example
* // Getting file size
* const size = await file.size;
* console.log(`File size: ${size} bytes`);
*
* @example
* // Check if file is larger than 1MB
* if (await file.size > 1024 * 1024) {
* console.log("Large file detected");
* }
*/
/**
* TODO: figure out how to get the typescript types to not error for this property.
*/
// size: Promise<number>;
/**
* Creates a new S3File representing a slice of the original file.
* Uses HTTP Range headers for efficient partial downloads.
*
* @param begin - Starting byte offset
* @param end - Ending byte offset (exclusive)
* @param contentType - Optional MIME type for the slice
* @returns A new S3File representing the specified range
*
* @example
* // Reading file header
* const header = file.slice(0, 1024);
* const headerText = await header.text();
*
* @example
* // Reading with content type
* const jsonSlice = file.slice(1024, 2048, "application/json");
* const data = await jsonSlice.json();
*
* @example
* // Reading from offset to end
* const remainder = file.slice(1024);
* const content = await remainder.text();
*/
slice(begin?: number, end?: number, contentType?: string): S3File;
slice(begin?: number, contentType?: string): S3File;
slice(contentType?: string): S3File;
/**
* Creates a writable stream for uploading data.
* Suitable for large files as it uses multipart upload.
*
* @param options - Configuration for the upload
* @returns A NetworkSink for writing data
*
* @example
* // Basic streaming write
* const writer = file.writer({
* type: "application/json"
* });
* writer.write('{"hello": ');
* writer.write('"world"}');
* await writer.end();
*
* @example
* // Optimized large file upload
* const writer = file.writer({
* partSize: 10 * 1024 * 1024, // 10MB parts
* queueSize: 4, // Upload 4 parts in parallel
* retry: 3 // Retry failed parts
* });
*
* // Write large chunks of data efficiently
* for (const chunk of largeDataChunks) {
* writer.write(chunk);
* }
* await writer.end();
*
* @example
* // Error handling
* const writer = file.writer();
* try {
* writer.write(data);
* await writer.end();
* } catch (err) {
* console.error('Upload failed:', err);
* // Writer will automatically abort multipart upload on error
* }
*/
writer(options?: S3Options): NetworkSink;
/**
* Gets a readable stream of the file's content.
* Useful for processing large files without loading them entirely into memory.
*
* @returns A ReadableStream for the file content
*
* @example
* // Basic streaming read
* const stream = file.stream();
* for await (const chunk of stream) {
* console.log('Received chunk:', chunk);
* }
*
* @example
* // Piping to response
* const stream = file.stream();
* return new Response(stream, {
* headers: { 'Content-Type': file.type }
* });
*
* @example
* // Processing large files
* const stream = file.stream();
* const textDecoder = new TextDecoder();
* for await (const chunk of stream) {
* const text = textDecoder.decode(chunk);
* // Process text chunk by chunk
* }
*/
readonly readable: ReadableStream;
stream(): ReadableStream;
/**
* The name or path of the file in the bucket.
*
* @example
* const file = s3("folder/image.jpg");
* console.log(file.name); // "folder/image.jpg"
*/
readonly name?: string;
/**
* The bucket name containing the file.
*
* @example
* const file = s3("s3://my-bucket/file.txt");
* console.log(file.bucket); // "my-bucket"
*/
readonly bucket?: string;
/**
* Checks if the file exists in S3.
* Uses HTTP HEAD request to efficiently check existence without downloading.
*
* @returns Promise resolving to true if file exists, false otherwise
*
* @example
* // Basic existence check
* if (await file.exists()) {
* console.log("File exists in S3");
* }
*
* @example
* // With error handling
* try {
* const exists = await file.exists();
* if (!exists) {
* console.log("File not found");
* }
* } catch (err) {
* console.error("Error checking file:", err);
* }
*/
exists(): Promise<boolean>;
/**
* Uploads data to S3.
* Supports various input types and automatically handles large files.
*
* @param data - The data to upload
* @param options - Upload configuration options
* @returns Promise resolving to number of bytes written
*
* @example
* // Writing string data
* await file.write("Hello World", {
* type: "text/plain"
* });
*
* @example
* // Writing JSON
* const data = { hello: "world" };
* await file.write(JSON.stringify(data), {
* type: "application/json"
* });
*
* @example
* // Writing from Response
* const response = await fetch("https://example.com/data");
* await file.write(response);
*
* @example
* // Writing with ACL
* await file.write(data, {
* acl: "public-read",
* type: "application/octet-stream"
* });
*/
write(
data:
| string
| ArrayBufferView
| ArrayBuffer
| SharedArrayBuffer
| Request
| Response
| BunFile
| S3File
| Blob,
options?: S3Options,
): Promise<number>;
/**
* Generates a