@number0/iroh
Version:
A toolkit for building distributed applications
1,485 lines (1,377 loc) • 52.4 kB
TypeScript
/* auto-generated by NAPI-RS */
/* eslint-disable */
/**
* Author key to insert entries in a document
*
* Internally, an author is a `SigningKey` which is used to sign entries.
*/
export declare class Author {
/** Get an [`Author`] from a String */
static fromString(str: string): Author
/** Get the [`AuthorId`] of this Author */
id(): AuthorId
toString(): string
}
/** Identifier for an [`Author`] */
export declare class AuthorId {
/** Get an [`AuthorId`] from a String. */
static fromString(str: string): AuthorId
/** Returns true when both AuthorId's have the same value */
isEqual(other: AuthorId): boolean
toString(): string
}
/** Iroh authors client. */
export declare class Authors {
/**
* Returns the default document author of this node.
*
* On persistent nodes, the author is created on first start and its public key is saved
* in the data directory.
*
* The default author can be set with [`Self::set_default`].
*/
default(): Promise<AuthorId>
/** List all the AuthorIds that exist on this node. */
list(): Promise<Array<AuthorId>>
/**
* Create a new document author.
*
* You likely want to save the returned [`AuthorId`] somewhere so that you can use this author
* again.
*
* If you need only a single author, use [`Self::default`].
*/
create(): Promise<AuthorId>
/**
* Export the given author.
*
* Warning: This contains sensitive data.
*/
export(author: AuthorId): Promise<Author>
/**
* Import the given author.
*
* Warning: This contains sensitive data.
*/
import(author: Author): Promise<AuthorId>
/**
* Deletes the given author by id.
*
* Warning: This permanently removes this author.
*/
delete(author: AuthorId): Promise<void>
}
export declare class BiStream {
get send(): SendStream
get recv(): RecvStream
}
/** Options to download data specified by the hash. */
export declare class BlobDownloadOptions {
/** Create a BlobDownloadRequest */
constructor(format: BlobFormat, nodes: Array<NodeAddr>, tag: SetTagOption)
}
/** Iroh blobs client. */
export declare class Blobs {
/**
* List all complete blobs.
*
* Note: this allocates for each `BlobListResponse`, if you have many `BlobListReponse`s this may be a prohibitively large list.
* Please file an [issue](https://github.com/n0-computer/iroh-ffi/issues/new) if you run into this issue
*/
list(): Promise<Array<Hash>>
/**
* Get the size information on a single blob.
*
* Method only exists in FFI
*/
size(hash: string): Promise<bigint>
/**
* Check if a blob is completely stored on the node.
*
* This is just a convenience wrapper around `status` that returns a boolean.
*/
has(hash: string): Promise<boolean>
/** Check the storage status of a blob on this node. */
status(hash: string): Promise<BlobStatus>
/**
* Read all bytes of single blob.
*
* This allocates a buffer for the full blob. Use only if you know that the blob you're
* reading is small. If not sure, use [`Self::blobs_size`] and check the size with
* before calling [`Self::blobs_read_to_bytes`].
*/
readToBytes(hash: string): Promise<Array<number>>
/**
* Read all bytes of single blob at `offset` for length `len`.
*
* This allocates a buffer for the full length `len`. Use only if you know that the blob you're
* reading is small. If not sure, use [`Self::blobs_size`] and check the size with
* before calling [`Self::blobs_read_at_to_bytes`].
*/
readAtToBytes(hash: string, offset: bigint, len: ReadAtLen): Promise<Array<number>>
/**
* Import a blob from a filesystem path.
*
* `path` should be an absolute path valid for the file system on which
* the node runs.
* If `in_place` is true, Iroh will assume that the data will not change and will share it in
* place without copying to the Iroh data directory.
*/
addFromPath(path: string, inPlace: boolean, tag: SetTagOption, wrap: WrapOption, cb: ((err: Error | null, arg: AddProgress) => void)): Promise<void>
/**
* Export the blob contents to a file path
* The `path` field is expected to be the absolute path.
*/
writeToPath(hash: string, path: string): Promise<void>
/** Write a blob by passing bytes. */
addBytes(bytes: Array<number>): Promise<BlobAddOutcome>
/** Write a blob by passing bytes, setting an explicit tag name. */
addBytesNamed(bytes: Array<number>, name: string): Promise<BlobAddOutcome>
/** Download a blob from another node and add it to the local database. */
download(hash: string, opts: BlobDownloadOptions, cb: ((err: Error | null, arg: DownloadProgress) => void)): Promise<void>
/**
* Export a blob from the internal blob store to a path on the node's filesystem.
*
* `destination` should be a writeable, absolute path on the local node's filesystem.
*
* If `format` is set to [`ExportFormat::Collection`], and the `hash` refers to a collection,
* all children of the collection will be exported. See [`ExportFormat`] for details.
*
* The `mode` argument defines if the blob should be copied to the target location or moved out of
* the internal store into the target location. See [`ExportMode`] for details.
*/
export(hash: string, destination: string, format: BlobExportFormat, mode: BlobExportMode): Promise<void>
/** Create a ticket for sharing a blob from this node. */
share(hash: string, blobFormat: BlobFormat, ticketOptions: AddrInfoOptions): Promise<BlobTicket>
/**
* List all incomplete (partial) blobs.
*
* Note: this allocates for each `BlobListIncompleteResponse`, if you have many `BlobListIncompleteResponse`s this may be a prohibitively large list.
* Please file an [issue](https://github.com/n0-computer/iroh-ffi/issues/new) if you run into this issue
*/
listIncomplete(): Promise<Array<IncompleteBlobInfo>>
/**
* List all collections.
*
* Note: this allocates for each `BlobListCollectionsResponse`, if you have many `BlobListCollectionsResponse`s this may be a prohibitively large list.
* Please file an [issue](https://github.com/n0-computer/iroh-ffi/issues/new) if you run into this issue
*/
listCollections(): Promise<Array<CollectionInfo>>
/** Read the content of a collection */
getCollection(hash: string): Promise<Collection>
/**
* Create a collection from already existing blobs.
*
* To automatically clear the tags for the passed in blobs you can set
* `tags_to_delete` on those tags, and they will be deleted once the collection is created.
*/
createCollection(collection: Collection, tag: SetTagOption, tagsToDelete: Array<string>): Promise<HashAndTag>
/** Delete a blob. */
deleteBlob(hash: string): Promise<void>
}
/**
* A token containing everything to get a file from the provider.
*
* It is a single item which can be easily serialized and deserialized.
*/
export declare class BlobTicket {
/** The format of the blob. */
readonly format: BlobFormat
/** The hash to retrieve. */
readonly hash: string
constructor(nodeAddr: NodeAddr, hash: string, format: BlobFormat)
static fromString(str: string): BlobTicket
get nodeAddr(): NodeAddr
/** Checks if the two tickets are equal */
isEqual(other: BlobTicket): boolean
toString(): string
/** True if the ticket is for a collection and should retrieve all blobs in it. */
recursive(): boolean
/** Convert this ticket into input parameters for a call to blobs_download */
asDownloadOptions(): BlobDownloadOptions
}
/** A collection of blobs */
export declare class Collection {
/** Create a new empty collection */
constructor()
/** Add the given blob to the collection */
push(name: string, hash: string): void
/** Check if the collection is empty */
isEmpty(): boolean
/** Get the names of the blobs in this collection */
names(): Array<string>
/** Get the links to the blobs in this collection */
links(): Array<string>
/** Get the blobs associated with this collection */
blobs(): Array<LinkAndName>
/** Returns the number of blobs in this collection */
length(): bigint
}
export declare class Connecting {
connect(): Promise<Connection>
alpn(): Promise<Buffer>
}
export declare class Connection {
alpn(): Buffer | null
remoteNodeId(): PublicKey
openUni(): Promise<SendStream>
acceptUni(): Promise<RecvStream>
openBi(): Promise<BiStream>
acceptBi(): Promise<BiStream>
readDatagram(): Promise<Buffer>
closed(): Promise<string>
closeReason(): string | null
close(errorCode: bigint, reason: Uint8Array): void
sendDatagram(data: Uint8Array): void
maxDatagramSize(): bigint | null
datagramSendBufferSpace(): bigint
rtt(): bigint
stableId(): bigint
setMaxConcurrentUniStream(count: bigint): void
setReceiveWindow(count: bigint): void
setMaxConcurrentBiiStream(count: bigint): void
}
/** A representation of a mutable, synchronizable key-value store. */
export declare class Doc {
/** Get the document id of this doc. */
id(): string
/** Close the document. */
closeMe(): Promise<void>
/** Set the content of a key to a byte array. */
setBytes(authorId: AuthorId, key: Array<number>, value: Array<number>): Promise<Hash>
/** Set an entries on the doc via its key, hash, and size. */
setHash(authorId: AuthorId, key: Array<number>, hash: string, size: bigint): Promise<void>
/** Add an entry from an absolute file path */
importFile(author: AuthorId, key: Array<number>, path: string, inPlace: boolean, cb?: ((err: Error | null, arg: DocImportProgress) => void) | undefined | null): Promise<void>
/** Export an entry as a file to a given absolute path */
exportFile(entry: Entry, path: string, cb?: ((err: Error | null, arg: DocExportProgress) => void) | undefined | null): Promise<void>
/**
* Delete entries that match the given `author` and key `prefix`.
*
* This inserts an empty entry with the key set to `prefix`, effectively clearing all other
* entries whose key starts with or is equal to the given `prefix`.
*
* Returns the number of entries deleted.
*/
delete(authorId: AuthorId, prefix: Array<number>): Promise<bigint>
/** Get an entry for a key and author. */
getExact(author: AuthorId, key: Array<number>, includeEmpty: boolean): Promise<Entry | null>
/**
* Get entries.
*
* Note: this allocates for each `Entry`, if you have many `Entry`s this may be a prohibitively large list.
* Please file an [issue](https://github.com/n0-computer/iroh-ffi/issues/new) if you run into this issue
*/
getMany(query: Query): Promise<Array<Entry>>
/** Get the latest entry for a key and author. */
getOne(query: Query): Promise<Entry | null>
/** Share this document with peers over a ticket. */
share(mode: ShareMode, addrOptions: AddrInfoOptions): Promise<DocTicket>
/** Start to sync this document with a list of peers. */
startSync(peers: Array<NodeAddr>): Promise<void>
/** Stop the live sync for this document. */
leave(): Promise<void>
/** Subscribe to events for this document. */
subscribe(cb: ((err: Error | null, arg: LiveEvent) => void)): Promise<void>
/** Get status info for this document */
status(): Promise<OpenState>
/** Set the download policy for this document */
setDownloadPolicy(policy: DownloadPolicy): Promise<void>
/** Get the download policy for this document */
getDownloadPolicy(): Promise<DownloadPolicy>
/** Get sync peers for this document */
getSyncPeers(): Promise<Array<Array<number>> | null>
}
/** Iroh docs client. */
export declare class Docs {
/** Create a new doc. */
create(): Promise<Doc>
/** Join and sync with an already existing document. */
join(ticket: DocTicket): Promise<Doc>
/** Join and sync with an already existing document and subscribe to events on that document. */
joinAndSubscribe(ticket: DocTicket, cb: ((err: Error | null, arg: LiveEvent) => void)): Promise<Doc>
/** List all the docs we have access to on this node. */
list(): Promise<Array<NamespaceAndCapability>>
/**
* Get a [`Doc`].
*
* Returns None if the document cannot be found.
*/
open(id: string): Promise<Doc | null>
/**
* Delete a document from the local node.
*
* This is a destructive operation. Both the document secret key and all entries in the
* document will be permanently deleted from the node's storage. Content blobs will be deleted
* through garbage collection unless they are referenced from another document or tag.
*/
dropDoc(docId: string): Promise<void>
}
/** Contains both a key (either secret or public) to a document, and a list of peers to join. */
export declare class DocTicket {
/** The actual capability. */
readonly capability: string
/** The capabillity kind */
readonly capabilityKind: CapabilityKind
static fromString(str: string): DocTicket
toString(): string
get nodes(): Array<NodeAddr>
}
/** Download policy to decide which content blobs shall be downloaded. */
export declare class DownloadPolicy {
/** Download everything */
static everything(): DownloadPolicy
/** Download nothing */
static nothing(): DownloadPolicy
/** Download nothing except keys that match the given filters */
static nothingExcept(filters: Array<FilterKind>): DownloadPolicy
/** Download everything except keys that match the given filters */
static everythingExcept(filters: Array<FilterKind>): DownloadPolicy
}
export declare class Endpoint {
/** The string representation of this endpoint's NodeId. */
nodeId(): string
connect(nodeAddr: NodeAddr, alpn: Uint8Array): Promise<Connection>
}
/** Filter strategy used in download policies. */
export declare class FilterKind {
/** Verifies whether this filter matches a given key */
matches(key: Array<number>): boolean
/** Returns a FilterKind that matches if the contained bytes are a prefix of the key. */
static prefix(prefix: Array<number>): FilterKind
/** Returns a FilterKind that matches if the contained bytes and the key are the same. */
static exact(key: Array<number>): FilterKind
}
/** Iroh gossip client. */
export declare class Gossip {
subscribe(topic: Array<number>, bootstrap: Array<string>, cb: ((err: Error | null, arg: Message) => void)): Promise<Sender>
}
/** Hash type used throughout Iroh. A blake3 hash. */
export declare class Hash {
/** The base32 representation of the hash. */
readonly value: string
/** Calculate the hash of the provide bytes. */
constructor(buf: Array<number>)
/** Checks if the other hash is equal to this instance. */
isEqual(other: Hash): boolean
/** Bytes of the hash. */
toBytes(): Array<number>
/** Create a `Hash` from its raw bytes representation. */
static fromBytes(bytes: Array<number>): Hash
/** Make a Hash from base32 or hex string */
static fromString(s: string): Hash
/** Convert the hash to a hex string. */
toString(target?: string | undefined | null): string
}
/** An Iroh node. Allows you to sync, store, and transfer data. */
export declare class Iroh {
/** Access to authors specific funtionaliy. */
get authors(): Authors
/** Access to blob specific funtionaliy. */
get blobs(): Blobs
/** Access to docs specific funtionaliy. */
get docs(): Docs
/** Access to gossip specific funtionaliy. */
get gossip(): Gossip
/** Access to net specific funtionaliy. */
get net(): Net
/**
* Create a new iroh node.
*
* The `path` param should be a directory where we can store or load
* iroh data from a previous session.
*/
static persistent(path: string, opts?: NodeOptions | undefined | null): Promise<Iroh>
/**
* Create a new iroh node.
*
* All data will be only persistet in memory.
*/
static memory(opts?: NodeOptions | undefined | null): Promise<Iroh>
/** Access to node specific funtionaliy. */
get node(): Node
}
/** Iroh net client. */
export declare class Net {
/** Return `RemoteInfo`s for nodes we know about. */
remoteInfoList(): Promise<Array<RemoteInfo>>
/** Return information on the given remote node. */
remoteInfo(nodeId: PublicKey): Promise<RemoteInfo | null>
/** The string representation of the PublicKey of this node. */
nodeId(): Promise<string>
/** Return the [`NodeAddr`] for this node. */
nodeAddr(): Promise<NodeAddr>
/** Add a known node address to the node. */
addNodeAddr(addr: NodeAddr): Promise<void>
/** Get the relay server we are connected to. */
homeRelay(): Promise<string | null>
}
/** Iroh node client. */
export declare class Node {
/** Get statistics of the running node. */
stats(): Promise<Record<string, CounterStats>>
/** Get status information about a node */
status(): Promise<NodeStatus>
/** Shutdown this iroh node. */
shutdown(): Promise<void>
endpoint(): Endpoint
}
/**
* A public key.
*
* The key itself is just a 32 byte array, but a key has associated crypto
* information that is cached for performance reasons.
*/
export declare class PublicKey {
/** Returns true if the PublicKeys are equal */
isEqual(other: PublicKey): boolean
/** Express the PublicKey as a byte array */
toBytes(): Array<number>
/** Make a PublicKey from base32 string */
static fromString(s: string): PublicKey
/** Make a PublicKey from byte array */
static fromBytes(bytes: Array<number>): PublicKey
/**
* Convert to a base32 string limited to the first 10 bytes for a friendly string
* representation of the key.
*/
fmtShort(): string
/** Converts the public key into base32 string. */
toString(): string
}
/**
* Build a Query to search for an entry or entries in a doc.
*
* Use this with `QueryOptions` to determine sorting, grouping, and pagination.
*/
export declare class Query {
/**
* Query all records.
*
* If `opts` is `None`, the default values will be used:
* sort_by: SortBy::AuthorKey
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static all(opts?: QueryOptions | undefined | null): Query
/**
* Query only the latest entry for each key, omitting older entries if the entry was written
* to by multiple authors.
*
* If `opts` is `None`, the default values will be used:
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static singleLatestPerKey(opts?: QueryOptions | undefined | null): Query
/**
* Query exactly the key, but only the latest entry for it, omitting older entries if the entry was written
* to by multiple authors.
*/
static singleLatestPerKeyExact(key: Array<number>): Query
/**
* Query only the latest entry for each key, with this prefix, omitting older entries if the entry was written
* to by multiple authors.
*
* If `opts` is `None`, the default values will be used:
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static singleLatestPerKeyPrefix(prefix: Array<number>, opts?: QueryOptions | undefined | null): Query
/**
* Query all entries for by a single author.
*
* If `opts` is `None`, the default values will be used:
* sort_by: SortBy::AuthorKey
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static author(author: AuthorId, opts?: QueryOptions | undefined | null): Query
/**
* Query all entries that have an exact key.
*
* If `opts` is `None`, the default values will be used:
* sort_by: SortBy::AuthorKey
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static keyExact(key: Array<number>, opts?: QueryOptions | undefined | null): Query
/** Create a Query for a single key and author. */
static authorKeyExact(author: AuthorId, key: Array<number>): Query
/**
* Create a query for all entries with a given key prefix.
*
* If `opts` is `None`, the default values will be used:
* sort_by: SortBy::AuthorKey
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static keyPrefix(prefix: Array<number>, opts?: QueryOptions | undefined | null): Query
/**
* Create a query for all entries of a single author with a given key prefix.
*
* If `opts` is `None`, the default values will be used:
* direction: SortDirection::Asc
* offset: None
* limit: None
*/
static authorKeyPrefix(author: AuthorId, prefix: Array<number>, opts?: QueryOptions | undefined | null): Query
/** Get the limit for this query (max. number of entries to emit). */
limit(): bigint | null
/** Get the offset for this query (number of entries to skip at the beginning). */
offset(): bigint
}
/** A chunk range specification as a sequence of chunk offsets */
export declare class RangeSpec {
/** Checks if this [`RangeSpec`] does not select any chunks in the blob */
isEmpty(): boolean
/** Check if this [`RangeSpec`] selects all chunks in the blob */
isAll(): boolean
}
export declare class RecvStream {
read(buf: Uint8Array): Promise<bigint | null>
readExact(buf: Uint8Array): Promise<void>
readToEnd(sizeLimit: number): Promise<Buffer>
id(): Promise<string>
stop(errorCode: bigint): Promise<void>
receivedReset(): Promise<bigint | null>
}
/** Gossip sender */
export declare class Sender {
/** Broadcast a message to all nodes in the swarm */
broadcast(msg: Array<number>): Promise<void>
/** Broadcast a message to all direct neighbors. */
broadcastNeighbors(msg: Array<number>): Promise<void>
/** Closes the subscription, it is an error to use it afterwards */
close(): Promise<void>
}
export declare class SendStream {
write(buf: Uint8Array): Promise<bigint>
writeAll(buf: Uint8Array): Promise<void>
finish(): Promise<void>
reset(errorCode: bigint): Promise<void>
setPriority(p: number): Promise<void>
priority(): Promise<number>
stopped(): Promise<bigint | null>
id(): Promise<string>
}
/** An option for commands that allow setting a Tag */
export declare class SetTagOption {
/** A tag will be automatically generated */
readonly auto: boolean
/** The tag is explicitly vecnamed */
readonly name?: Array<number>
/** Indicate you want an automatically generated tag */
static auto(): SetTagOption
/** Indicate you want a named tag */
static named(tag: Array<number>): SetTagOption
}
/** A response to a list collections request */
export declare class TagInfo {
/** The tag */
name: Array<number>
/** The format of the associated blob */
format: BlobFormat
/** The hash of the associated blob */
hash: string
}
/** Iroh tags client. */
export declare class Tags {
/**
* List all tags
*
* Note: this allocates for each `ListTagsResponse`, if you have many `Tags`s this may be a prohibitively large list.
* Please file an [issue](https://github.com/n0-computer/iroh-ffi/issues/new) if you run into this issue
*/
list(): Promise<Array<TagInfo>>
/** Delete a tag */
delete(name: Array<number>): Promise<void>
}
/** Progress updates for the add operation. */
export interface AddProgress {
/** An item was found with name `name`, from now on referred to via `id` */
found?: AddProgressFound
/** We got progress ingesting item `id`. */
progress?: AddProgressProgress
/** We are done with `id`, and the hash is `hash`. */
done?: AddProgressDone
/** We are done with the whole operation. */
allDone?: AddProgressAllDone
}
/** An AddProgress event indicating we are done with the the whole operation */
export interface AddProgressAllDone {
/** The hash of the created data. */
hash: string
/** The format of the added data. */
format: BlobFormat
/** The tag of the added data. */
tag: Array<number>
}
/** An AddProgress event indicated we are done with `id` and now have a hash `hash` */
export interface AddProgressDone {
/** The unique id of the entry. */
id: bigint
/** The hash of the entry. */
hash: string
}
/** An AddProgress event indicating an item was found with name `name`, that can be referred to by `id` */
export interface AddProgressFound {
/** A new unique id for this entry. */
id: bigint
/** The name of the entry. */
name: string
/** The size of the entry in bytes. */
size: bigint
}
/** An AddProgress event indicating we got progress ingesting item `id`. */
export interface AddProgressProgress {
/** The unique id of the entry. */
id: bigint
/** The offset of the progress, in bytes. */
offset: bigint
}
/** Options when creating a ticket */
export declare const enum AddrInfoOptions {
/**
* Only the Node ID is added.
*
* This usually means that iroh-dns discovery is used to find address information.
*/
Id = 'Id',
/** Include both the relay URL and the direct addresses. */
RelayAndAddresses = 'RelayAndAddresses',
/** Only include the relay URL. */
Relay = 'Relay',
/** Only include the direct addresses. */
Addresses = 'Addresses'
}
/** Outcome of a blob add operation. */
export interface BlobAddOutcome {
/** The hash of the blob */
hash: string
/** The format the blob */
format: BlobFormat
/** The size of the blob */
size: bigint
/** The tag of the blob */
tag: Array<number>
}
/** The expected format of a hash being exported. */
export declare const enum BlobExportFormat {
/** The hash refers to any blob and will be exported to a single file. */
Blob = 'Blob',
/**
* The hash refers to a [`crate::format::collection::Collection`] blob
* and all children of the collection shall be exported to one file per child.
*
* If the blob can be parsed as a [`BlobFormat::HashSeq`], and the first child contains
* collection metadata, all other children of the collection will be exported to
* a file each, with their collection name treated as a relative path to the export
* destination path.
*
* If the blob cannot be parsed as a collection, the operation will fail.
*/
Collection = 'Collection'
}
/**
* The export mode describes how files will be exported.
*
* This is a hint to the import trait method. For some implementations, this
* does not make any sense. E.g. an in memory implementation will always have
* to copy the file into memory. Also, a disk based implementation might choose
* to copy small files even if the mode is `Reference`.
*/
export declare const enum BlobExportMode {
/**
* This mode will copy the file to the target directory.
*
* This is the safe default because the file can not be accidentally modified
* after it has been exported.
*/
Copy = 'Copy',
/**
* This mode will try to move the file to the target directory and then reference it from
* the database.
*
* This has a large performance and storage benefit, but it is less safe since
* the file might be modified in the target directory after it has been exported.
*
* Stores are allowed to ignore this mode and always copy the file, e.g.
* if the file is very small or if the store does not support referencing files.
*/
TryReference = 'TryReference'
}
/** A format identifier */
export declare const enum BlobFormat {
/** Raw blob */
Raw = 'Raw',
/** A sequence of BLAKE3 hashes */
HashSeq = 'HashSeq'
}
/** A response to a list blobs request */
export interface BlobInfo {
/** Location of the blob */
path: string
/** The hash of the blob */
hash: string
/** The size of the blob */
size: bigint
}
/** Events emitted by the provider informing about the current status. */
export interface BlobProvideEvent {
/** A new collection or tagged blob has been added */
taggedBlobAdded?: TaggedBlobAdded
/** A new client connected to the node. */
clientConnected?: ClientConnected
/** A request was received from a client. */
getRequestReceived?: GetRequestReceived
/** A sequence of hashes has been found and is being transferred. */
transferHashSeqStarted?: TransferHashSeqStarted
/**
* A chunk of a blob was transferred.
*
* These events will be sent with try_send, so you can not assume that you
* will receive all of them.
*/
transferProgress?: TransferProgress
/** A blob in a sequence was transferred. */
transferBlobCompleted?: TransferBlobCompleted
/** A request was completed and the data was sent to the client. */
transferCompleted?: TransferCompleted
/** A request was aborted because the client disconnected. */
transferAborted?: TransferAborted
}
/** Status information about a blob. */
export type BlobStatus =
| { type: 'NotFound' }
| { type: 'Partial', /** The size of the currently stored partial blob. */
size: bigint, /** If the size is verified. */
sizeIsVerified: boolean }
| { type: 'Complete', /** The size of the blob. */
size: bigint }
export declare const enum CapabilityKind {
/** A writable replica. */
Write = 'Write',
/** A readable replica. */
Read = 'Read'
}
/** A new client connected to the node. */
export interface ClientConnected {
/** An unique connection id. */
connectionId: bigint
}
/** A response to a list collections request */
export interface CollectionInfo {
/** Tag of the collection */
tag: Array<number>
/** Hash of the collection */
hash: string
/**
* Number of children in the collection
*
* This is an optional field, because the data is not always available.
*/
totalBlobsCount?: bigint
/**
* Total size of the raw data referred to by all links
*
* This is an optional field, because the data is not always available.
*/
totalBlobsSize?: bigint
}
/** The type of connection we have to the node */
export interface ConnectionType {
/** The type of connection. */
type: ConnType
/** Details of the actual connection, dependent on the type. */
details?: string
}
/** The type of the connection */
export declare const enum ConnType {
/** Indicates you have a UDP connection. */
Direct = 'Direct',
/** Indicates you have a relayed connection. */
Relay = 'Relay',
/** Indicates you have an unverified UDP connection, and a relay connection for backup. */
Mixed = 'Mixed',
/** Indicates you have no proof of connection. */
None = 'None'
}
/** Whether the content status is available on a node. */
export declare const enum ContentStatus {
/** The content is completely available. */
Complete = 'Complete',
/** The content is partially available. */
Incomplete = 'Incomplete',
/** The content is missing. */
Missing = 'Missing'
}
/** Stats counter */
export interface CounterStats {
/** The counter value */
value: number
/** The counter description */
description: string
}
/** Information about a direct address. */
export interface DirectAddrInfo {
/** The address reported. */
addr: string
/** The latency to the address, if any. */
latency?: number
/** Last control message received by this node. */
lastControlTime?: number
lastControlMsg?: string
/** How long ago was the last payload message for this node. */
lastPayload?: number
/** When was this connection last alive, if ever. */
lastAlive?: number
}
/** Progress updates for the doc import file operation. */
export interface DocExportProgress {
/** An item was found with name `name`, from now on referred to via `id` */
found?: DocExportProgressFound
/** We got progress ingesting item `id`. */
progress?: DocExportProgressProgress
/** We finished exporting a blob */
done?: DocExportProgressDone
/** We are done with the whole operation. */
allDone: boolean
}
/** A DocExportProgress event indicating we got an error and need to abort */
export interface DocExportProgressAbort {
/** The error message */
error: string
}
/** A DocExportProgress event indicating a single blob wit `id` is done */
export interface DocExportProgressDone {
/** The unique id of the entry. */
id: bigint
}
/** A DocExportProgress event indicating a file was found with name `name`, from now on referred to via `id` */
export interface DocExportProgressFound {
/** A new unique id for this entry. */
id: bigint
/** The hash of the entry. */
hash: string
/** The size of the entry in bytes. */
size: bigint
/** The path where we are writing the entry */
outpath: string
}
/** A DocExportProgress event indicating we've made progress exporting item `id`. */
export interface DocExportProgressProgress {
/** The unique id of the entry. */
id: bigint
/** The offset of the progress, in bytes. */
offset: bigint
}
/** Progress updates for the doc import file operation. */
export interface DocImportProgress {
/** An item was found with name `name`, from now on referred to via `id` */
found?: DocImportProgressFound
/** We got progress ingesting item `id`. */
progress?: DocImportProgressProgress
/** We are done ingesting `id`, and the hash is `hash`. */
ingestDone?: DocImportProgressIngestDone
/** We are done with the whole operation. */
allDone?: DocImportProgressAllDone
}
/** A DocImportProgress event indicating we are done setting the entry to the doc */
export interface DocImportProgressAllDone {
/** The key of the entry */
key: Array<number>
}
/** A DocImportProgress event indicating a file was found with name `name`, from now on referred to via `id` */
export interface DocImportProgressFound {
/** A new unique id for this entry. */
id: bigint
/** The name of the entry. */
name: string
/** The size of the entry in bytes. */
size: bigint
}
/** A DocImportProgress event indicating we are finished adding `id` to the data store and the hash is `hash`. */
export interface DocImportProgressIngestDone {
/** The unique id of the entry. */
id: bigint
/** The hash of the entry. */
hash: string
}
/** A DocImportProgress event indicating we've made progress ingesting item `id`. */
export interface DocImportProgressProgress {
/** The unique id of the entry. */
id: bigint
/** The offset of the progress, in bytes. */
offset: bigint
}
/** The type of `DocImportProgress` event */
export declare const enum DocImportProgressType {
/** An item was found with name `name`, from now on referred to via `id` */
Found = 'Found',
/** We got progress ingesting item `id`. */
Progress = 'Progress',
/** We are done ingesting `id`, and the hash is `hash`. */
IngestDone = 'IngestDone',
/** We are done with the whole operation. */
AllDone = 'AllDone',
/**
* We got an error and need to abort.
*
* This will be the last message in the stream.
*/
Abort = 'Abort'
}
/** Progress updates for the get operation. */
export interface DownloadProgress {
/** Initial state if subscribing to a running or queued transfer. */
initialState?: DownloadProgressInitialState
/** A new connection was established. */
connected?: undefined
/** An item was found with hash `hash`, from now on referred to via `id` */
found?: DownloadProgressFound
/** Data was found locally */
foundLocal?: DownloadProgressFoundLocal
/** An item was found with hash `hash`, from now on referred to via `id` */
foundHashSeq?: DownloadProgressFoundHashSeq
/** We got progress ingesting item `id`. */
progress?: DownloadProgressProgress
/** We are done with `id`, and the hash is `hash`. */
done?: DownloadProgressDone
/** We are done with the whole operation. */
allDone?: DownloadProgressAllDone
}
/** A DownloadProgress event indicating we got an error and need to abort */
export interface DownloadProgressAbort {
error: string
}
/** A DownloadProgress event indicating we are done with the whole operation */
export interface DownloadProgressAllDone {
/** The number of bytes written */
bytesWritten: bigint
/** The number of bytes read */
bytesRead: bigint
/** The time it took to transfer the data, in milliseconds. */
elapsed: bigint
}
/** A DownloadProgress event indicated we are done with `id` */
export interface DownloadProgressDone {
/** The unique id of the entry. */
id: bigint
}
/** A DownloadProgress event indicating an item was found with hash `hash`, that can be referred to by `id` */
export interface DownloadProgressFound {
/** A new unique id for this entry. */
id: bigint
/** child offset */
child: bigint
/** The hash of the entry. */
hash: string
/** The size of the entry in bytes. */
size: bigint
}
/** A DownloadProgress event indicating an item was found with hash `hash`, that can be referred to by `id` */
export interface DownloadProgressFoundHashSeq {
/** Number of children in the collection, if known. */
children: bigint
/** The hash of the entry. */
hash: string
}
/** A DownloadProgress event indicating an entry was found locally */
export interface DownloadProgressFoundLocal {
/** child offset */
child: bigint
/** The hash of the entry. */
hash: string
/** The size of the entry in bytes. */
size: bigint
}
export interface DownloadProgressInitialState {
/** Whether we are connected to a node */
connected: boolean
}
/** A DownloadProgress event indicating we got progress ingesting item `id`. */
export interface DownloadProgressProgress {
/** The unique id of the entry. */
id: bigint
/** The offset of the progress, in bytes. */
offset: bigint
}
/**
* A single entry in a [`Doc`]
*
* An entry is identified by a key, its [`AuthorId`], and the [`Doc`]'s
* namespace id. Its value is the 32-byte BLAKE3 [`hash`]
* of the entry's content data, the size of this content data, and a timestamp.
*/
export interface Entry {
/** The namespace this entry belongs to */
namespace: string
/** The author of the entry */
author: string
/** The key of the entry. */
key: Array<number>
/** Length of the data referenced by `hash`. */
len: bigint
/** Hash of the content data. */
hash: string
/** Record creation timestamp. Counted as micros since the Unix epoch. */
timestamp: bigint
}
/** A request was received from a client. */
export interface GetRequestReceived {
/** An unique connection id. */
connectionId: bigint
/** An identifier uniquely identifying this transfer request. */
requestId: bigint
/** The hash for which the client wants to receive data. */
hash: string
}
/** The Hash and associated tag of a newly created collection */
export interface HashAndTag {
/** The hash of the collection */
hash: string
/** The tag of the collection */
tag: Array<number>
}
/** A response to a list blobs request */
export interface IncompleteBlobInfo {
/** The size we got */
size: bigint
/** The size we expect */
expectedSize: bigint
/** The hash of the blob */
hash: string
}
/** Outcome of an InsertRemove event. */
export interface InsertRemoteEvent {
/** The peer that sent us the entry. */
from: string
/** The inserted entry. */
entry: Entry
/** If the content is available at the local node */
contentStatus: ContentStatus
}
/**
* Helper function that translates a key that was derived from the [`path_to_key`] function back
* into a path.
*
* If `prefix` exists, it will be stripped before converting back to a path
* If `root` exists, will add the root as a parent to the created path
* Removes any null byte that has been appened to the key
*/
export declare function keyToPath(key: Array<number>, prefix?: string | undefined | null, root?: string | undefined | null): string
/** The latency and type of the control message */
export interface LatencyAndControlMsg {
/** The latency of the control message. In milliseconds */
latency: number
/** The type of control message, represented as a string */
controlMsg: string
}
/** `LinkAndName` includes a name and a hash for a blob in a collection */
export interface LinkAndName {
/** The name associated with this [`Hash`] */
name: string
/** The [`Hash`] of the blob */
link: string
}
/** Events informing about actions of the live sync progress */
export interface LiveEvent {
/** A local insertion. */
insertLocal?: LiveEventInsertLocal
/** Received a remote insert. */
insertRemote?: LiveEventInsertRemote
/** The content of an entry was downloaded and is now available at the local node */
contentReady?: LiveEventContentReady
/** We have a new neighbor in the swarm. */
neighborUp?: LiveEventNeighborUp
/** We lost a neighbor in the swarm. */
neighborDown?: LiveEventNeighborDown
/** A set-reconciliation sync finished. */
syncFinished?: SyncEvent
/**
* All pending content is now ready.
*
* This event signals that all queued content downloads from the last sync run have either
* completed or failed.
*
* It will only be emitted after a [`Self::SyncFinished`] event, never before.
*
* Receiving this event does not guarantee that all content in the document is available. If
* blobs failed to download, this event will still be emitted after all operations completed.
*/
pendingContentReady: boolean
}
export interface LiveEventContentReady {
/** The content hash of the newly available entry content */
hash: string
}
export interface LiveEventInsertLocal {
/** The inserted entry. */
entry: Entry
}
export interface LiveEventInsertRemote {
/** The peer that sent us the entry. */
from: string
/** The inserted entry. */
entry: Entry
/** If the content is available at the local node */
contentStatus: ContentStatus
}
export interface LiveEventNeighborDown {
/** Public key of the neighbor */
neighbor: string
}
export interface LiveEventNeighborUp {
/** Public key of the neighbor */
neighbor: string
}
/** The logging level. See the rust (log crate)[https://docs.rs/log] for more information. */
export declare const enum LogLevel {
Trace = 'Trace',
Debug = 'Debug',
Info = 'Info',
Warn = 'Warn',
Error = 'Error',
Off = 'Off'
}
/** Gossip message */
export interface Message {
/** We have a new, direct neighbor in the swarm membership layer for this topic */
neighborUp?: string
/** We dropped direct neighbor in the swarm membership layer for this topic */
neighborDown?: string
/** A gossip message was received for this topic */
received?: MessageContent
joined?: Array<string>
/** We missed some messages */
lagged: boolean
}
/** The actual content of a gossip message. */
export interface MessageContent {
/** The content of the message */
content: Array<number>
/** The node that delivered the message. This is not the same as the original author. */
deliveredFrom: string
}
/** The namespace id and CapabilityKind (read/write) of the doc */
export interface NamespaceAndCapability {
/** The namespace id of the doc */
namespace: string
/** The capability you have for the doc (read/write) */
capability: CapabilityKind
}
/** A peer and it's addressing information. */
export interface NodeAddr {
nodeId: string
/** Get the home relay URL for this peer */
relayUrl?: string
/** Direct addresses of this peer. */
addresses?: Array<string>
}
export declare const enum NodeDiscoveryConfig {
/** Use no node discovery mechanism. */
None = 'None',
/**
* Use the default discovery mechanism.
*
* This uses two discovery services concurrently:
*
* - It publishes to a pkarr service operated by [number 0] which makes the information
* available via DNS in the `iroh.link` domain.
*
* - It uses an mDNS-like system to announce itself on the local network.
*
* # Usage during tests
*
* Note that the default changes when compiling with `cfg(test)` or the `test-utils`
* cargo feature from [iroh-net] is enabled. In this case only the Pkarr/DNS service
* is used, but on the `iroh.test` domain. This domain is not integrated with the
* global DNS network and thus node discovery is effectively disabled. To use node
* discovery in a test use the [`iroh_net::test_utils::DnsPkarrServer`] in the test and
* configure it here as a custom discovery mechanism ([`DiscoveryConfig::Custom`]).
*
* [number 0]: https://n0.computer
* [iroh-net]: crate::net
*/
Default = 'Default'
}
/** Options passed to [`IrohNode.new`]. Controls the behaviour of an iroh node.# */
export interface NodeOptions {
/**
* How frequently the blob store should clean up unreferenced blobs, in milliseconds.
* Set to null to disable gc
*/
gcIntervalMillis?: number
/** Provide a callback to hook into events when the blobs component adds and provides blobs. */
blobEvents?: ((err: Error | null, arg: BlobProvideEvent) => void)
/** Should docs be enabled? Defaults to `false`. */
enableDocs?: boolean
/** Overwrites the default IPv4 address to bind to */
ipv4Addr?: string
/** Overwrites the default IPv6 address to bind to */
ipv6Addr?: string
/** Configure the node discovery. */
nodeDiscovery?: NodeDiscoveryConfig
/** Provide a specific secret key, identifying this node. Must be 32 bytes long. */
secretKey?: Array<number>
protocols?: Record<Array<number>, ((err: Error | null, arg: Endpoint) => ProtocolHandler)>
}
/** The response to a status request */
export interface NodeStatus {
/** The node id and socket addresses of this node. */
addr: NodeAddr
/** The bound listening addresses of the node */
listenAddrs: Array<string>
/** The version of the node */
version: string
/** RPC address, if currently listening. */
rpcAddr?: string
}
/** The state for an open replica. */
export interface OpenState {
/** Whether to accept sync requests for this replica. */
sync: boolean
/** How many event subscriptions are open */
subscribers: bigint
/** By how many handles the replica is currently held open */
handles: bigint
}
/** Why we performed a sync exchange */
export declare const enum Origin {
/** Direct join request via API */
ConnectDirectJoin = 'ConnectDirectJoin',
/** Peer showed up as new neighbor in the gossip swarm */
ConnectNewNeighbor = 'ConnectNewNeighbor',
/** We synced after receiving a sync report that indicated news for us */
ConnectSyncReport = 'ConnectSyncReport',
/** We received a sync report while a sync was running, so run again afterwars */
ConnectResync = 'ConnectResync',
/** A peer connected to us and we accepted the exchange */
Accept = 'Accept'
}
/**
* Helper function that creates a document key from a canonicalized path, removing the `root` and adding the `prefix`, if they exist
*
* Appends the null byte to the end of the key.
*/
export declare function pathToKey(path: string, prefix?: string | undefined | null, root?: string | undefined | null): Array<number>
export interface ProtocolHandler {
accept: ((err: Error | null, arg: Connection) => void)
shutdown?: ((err: Error | null, ) => void)
}
/** Options for sorting and pagination for using [`Query`]s. */
export interface QueryOptions {
/**
* Sort by author or key first.
*
* Default is [`SortBy::AuthorKey`], so sorting first by author and then by key.
*/
sortBy?: SortBy
/**
* Direction by which to sort the entries
*
* Default is [`SortDirection::Asc`]
*/
direction?: SortDirection
/** Offset */
offset?: bigint
/**
* Limit to limit the pagination.
*
* When the limit is 0, the limit does not exist.
*/
limit?: bigint
}
/** Defines the way to read bytes. */
export interface ReadAtLen {
type: ReadAtLenType
/** The size to read, must be set for `Exact` and `AtMost`. */
size?: bigint
}
/** Defines the way to read bytes. */
export declare const enum ReadAtLenType {
/** Reads all available bytes. */
All = 'All',
/** Reads exactly this many bytes, erroring out on larger or smaller. */
Exact = 'Exact',
/** Reads at most this many bytes. */
AtMost = 'AtMost'
}
/** Information about a connection */
export interface RemoteInfo {
/** The node identifier of the endpoint. Also a public key. */
nodeId: Array<number>
/** Relay url, if available. */
relayUrl?: string
/**
* List of addresses at which this node might be reachable, plus any latency information we
* have about that address and the last time the address was used.
*/
addrs: Array<DirectAddrInfo>
/** The type of connection we have to the peer, either direct or over relay. */
connType: ConnectionType
/** The latency of the `conn_type`. In milliseconds. */
latency?: number
/** Duration since the last time this peer was used. In milliseconds. */
lastUsed?: number
}
/** Set the logging level. */
export declare function setLogLevel(level: LogLevel): void
/** Intended capability for document share tickets */
export declare const enum ShareMode {
/** Read-only access */
Read = 'Read',
/** Write access */
Write = 'Write'
}
/** Fields by which the query can be sorted */
export declare const enum SortBy {
/** Sort by key, then author. */
KeyAuthor = 'KeyAuthor',
/** Sort by author, then key. */
AuthorKey = 'AuthorKey'
}
/** Sort direction */
export declare const enum SortDirection {
/** Sort ascending */
Asc = 'Asc',
/** Sort descending */
Desc = 'Desc'
}
/** Outcome of a sync operation */
export interface SyncEvent {
/** Peer we synced with */
peer: string
/** Origin of the sync exchange */
origin: Origin
/** Timestamp when the sync finished */
finished: Date
/** Timestamp when the sync started */
started: Date
/** Result of the sync operation. `None` if successfull. */
result?: string
}
/** Why we started a sync request */
export declare const enum SyncReason {
/** Direct join request via API */
DirectJoin = 'DirectJoin',
/** Peer showed up as new neighbor in the gossip swarm */
NewNeighbor = 'NewNeighbor',
/** We synced after receiving a sync report that indicated news for us */
SyncReport = 'SyncReport',
/** We received a sync report while a sync was running, so run again afterwars */
Resync = 'Resync'
}
/** An BlobProvide event indicating a new tagged blob or collection was added */
export interface TaggedBlobAdded {
/** The hash of the added data */
hash: string
/** The format of the added data */
format: BlobFormat
/** The