UNPKG

@qdrant/js-client-rest

Version:

This repository contains the REST client for the [Qdrant](https://github.com/qdrant/qdrant) vector search engine.

1,058 lines 252 kB
/** * This file was auto-generated by openapi-typescript. * Do not make direct changes to the file. */ /** OneOf type helpers */ type Without<T, U> = { [P in Exclude<keyof T, keyof U>]?: never; }; type XOR<T, U> = (T | U) extends object ? (Without<T, U> & U) | (Without<U, T> & T) : T | U; type OneOf<T extends any[]> = T extends [infer Only] ? Only : T extends [infer A, infer B, ...infer Rest] ? OneOf<[XOR<A, B>, ...Rest]> : never; export interface paths { "/collections/{collection_name}/shards": { /** Create shard key */ put: operations["create_shard_key"]; }; "/collections/{collection_name}/shards/delete": { /** Delete shard key */ post: operations["delete_shard_key"]; }; "/": { /** * Returns information about the running Qdrant instance * @description Returns information about the running Qdrant instance like version and commit id */ get: operations["root"]; }; "/telemetry": { /** * Collect telemetry data * @description Collect telemetry data including app info, system info, collections info, cluster info, configs and statistics */ get: operations["telemetry"]; }; "/metrics": { /** * Collect Prometheus metrics data * @description Collect metrics data including app info, collections info, cluster info and statistics */ get: operations["metrics"]; }; "/locks": { /** * Get lock options * @description Get lock options. If write is locked, all write operations and collection creation are forbidden */ get: operations["get_locks"]; /** * Set lock options * @description Set lock options. If write is locked, all write operations and collection creation are forbidden. Returns previous lock options */ post: operations["post_locks"]; }; "/healthz": { /** * Kubernetes healthz endpoint * @description An endpoint for health checking used in Kubernetes. */ get: operations["healthz"]; }; "/livez": { /** * Kubernetes livez endpoint * @description An endpoint for health checking used in Kubernetes. */ get: operations["livez"]; }; "/readyz": { /** * Kubernetes readyz endpoint * @description An endpoint for health checking used in Kubernetes. */ get: operations["readyz"]; }; "/issues": { /** * Get issues * @description Get a report of performance issues and configuration suggestions */ get: operations["get_issues"]; /** * Clear issues * @description Removes all issues reported so far */ delete: operations["clear_issues"]; }; "/cluster": { /** * Get cluster status info * @description Get information about the current state and composition of the cluster */ get: operations["cluster_status"]; }; "/cluster/recover": { /** Tries to recover current peer Raft state. */ post: operations["recover_current_peer"]; }; "/cluster/peer/{peer_id}": { /** * Remove peer from the cluster * @description Tries to remove peer from the cluster. Will return an error if peer has shards on it. */ delete: operations["remove_peer"]; }; "/collections": { /** * List collections * @description Get list name of all existing collections */ get: operations["get_collections"]; }; "/collections/{collection_name}": { /** * Collection info * @description Get detailed information about specified existing collection */ get: operations["get_collection"]; /** * Create collection * @description Create new collection with given parameters */ put: operations["create_collection"]; /** * Delete collection * @description Drop collection and all associated data */ delete: operations["delete_collection"]; /** * Update collection parameters * @description Update parameters of the existing collection */ patch: operations["update_collection"]; }; "/collections/aliases": { /** Update aliases of the collections */ post: operations["update_aliases"]; }; "/collections/{collection_name}/index": { /** * Create index for field in collection * @description Create index for field in collection */ put: operations["create_field_index"]; }; "/collections/{collection_name}/exists": { /** * Check the existence of a collection * @description Returns "true" if the given collection name exists, and "false" otherwise */ get: operations["collection_exists"]; }; "/collections/{collection_name}/index/{field_name}": { /** * Delete index for field in collection * @description Delete field index for collection */ delete: operations["delete_field_index"]; }; "/collections/{collection_name}/cluster": { /** * Collection cluster info * @description Get cluster information for a collection */ get: operations["collection_cluster_info"]; /** Update collection cluster setup */ post: operations["update_collection_cluster"]; }; "/collections/{collection_name}/aliases": { /** * List aliases for collection * @description Get list of all aliases for a collection */ get: operations["get_collection_aliases"]; }; "/aliases": { /** * List collections aliases * @description Get list of all existing collections aliases */ get: operations["get_collections_aliases"]; }; "/collections/{collection_name}/snapshots/upload": { /** * Recover from an uploaded snapshot * @description Recover local collection data from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. */ post: operations["recover_from_uploaded_snapshot"]; }; "/collections/{collection_name}/snapshots/recover": { /** * Recover from a snapshot * @description Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. */ put: operations["recover_from_snapshot"]; }; "/collections/{collection_name}/snapshots": { /** * List collection snapshots * @description Get list of snapshots for a collection */ get: operations["list_snapshots"]; /** * Create collection snapshot * @description Create new snapshot for a collection */ post: operations["create_snapshot"]; }; "/collections/{collection_name}/snapshots/{snapshot_name}": { /** * Download collection snapshot * @description Download specified snapshot from a collection as a file */ get: operations["get_snapshot"]; /** * Delete collection snapshot * @description Delete snapshot for a collection */ delete: operations["delete_snapshot"]; }; "/snapshots": { /** * List of storage snapshots * @description Get list of snapshots of the whole storage */ get: operations["list_full_snapshots"]; /** * Create storage snapshot * @description Create new snapshot of the whole storage */ post: operations["create_full_snapshot"]; }; "/snapshots/{snapshot_name}": { /** * Download storage snapshot * @description Download specified snapshot of the whole storage as a file */ get: operations["get_full_snapshot"]; /** * Delete storage snapshot * @description Delete snapshot of the whole storage */ delete: operations["delete_full_snapshot"]; }; "/collections/{collection_name}/shards/{shard_id}/snapshots/upload": { /** * Recover shard from an uploaded snapshot * @description Recover shard of a local collection from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection shard. */ post: operations["recover_shard_from_uploaded_snapshot"]; }; "/collections/{collection_name}/shards/{shard_id}/snapshots/recover": { /** * Recover from a snapshot * @description Recover shard of a local collection data from a snapshot. This will overwrite any data, stored in this shard, for the collection. */ put: operations["recover_shard_from_snapshot"]; }; "/collections/{collection_name}/shards/{shard_id}/snapshots": { /** * List shards snapshots for a collection * @description Get list of snapshots for a shard of a collection */ get: operations["list_shard_snapshots"]; /** * Create shard snapshot * @description Create new snapshot of a shard for a collection */ post: operations["create_shard_snapshot"]; }; "/collections/{collection_name}/shards/{shard_id}/snapshots/{snapshot_name}": { /** * Download collection snapshot * @description Download specified snapshot of a shard from a collection as a file */ get: operations["get_shard_snapshot"]; /** * Delete shard snapshot * @description Delete snapshot of a shard for a collection */ delete: operations["delete_shard_snapshot"]; }; "/collections/{collection_name}/points/{id}": { /** * Get point * @description Retrieve full information of single point by id */ get: operations["get_point"]; }; "/collections/{collection_name}/points": { /** * Upsert points * @description Perform insert + updates on points. If point with given ID already exists - it will be overwritten. */ put: operations["upsert_points"]; /** * Get points * @description Retrieve multiple points by specified IDs */ post: operations["get_points"]; }; "/collections/{collection_name}/points/delete": { /** * Delete points * @description Delete points */ post: operations["delete_points"]; }; "/collections/{collection_name}/points/vectors": { /** * Update vectors * @description Update specified named vectors on points, keep unspecified vectors intact. */ put: operations["update_vectors"]; }; "/collections/{collection_name}/points/vectors/delete": { /** * Delete vectors * @description Delete named vectors from the given points. */ post: operations["delete_vectors"]; }; "/collections/{collection_name}/points/payload": { /** * Overwrite payload * @description Replace full payload of points with new one */ put: operations["overwrite_payload"]; /** * Set payload * @description Set payload values for points */ post: operations["set_payload"]; }; "/collections/{collection_name}/points/payload/delete": { /** * Delete payload * @description Delete specified key payload for points */ post: operations["delete_payload"]; }; "/collections/{collection_name}/points/payload/clear": { /** * Clear payload * @description Remove all payload for specified points */ post: operations["clear_payload"]; }; "/collections/{collection_name}/points/batch": { /** * Batch update points * @description Apply a series of update operations for points, vectors and payloads */ post: operations["batch_update"]; }; "/collections/{collection_name}/points/scroll": { /** * Scroll points * @description Scroll request - paginate over all points which matches given filtering condition */ post: operations["scroll_points"]; }; "/collections/{collection_name}/points/search": { /** * Search points * @description Retrieve closest points based on vector similarity and given filtering conditions */ post: operations["search_points"]; }; "/collections/{collection_name}/points/search/batch": { /** * Search batch points * @description Retrieve by batch the closest points based on vector similarity and given filtering conditions */ post: operations["search_batch_points"]; }; "/collections/{collection_name}/points/search/groups": { /** * Search point groups * @description Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given payload field */ post: operations["search_point_groups"]; }; "/collections/{collection_name}/points/recommend": { /** * Recommend points * @description Look for the points which are closer to stored positive examples and at the same time further to negative examples. */ post: operations["recommend_points"]; }; "/collections/{collection_name}/points/recommend/batch": { /** * Recommend batch points * @description Look for the points which are closer to stored positive examples and at the same time further to negative examples. */ post: operations["recommend_batch_points"]; }; "/collections/{collection_name}/points/recommend/groups": { /** * Recommend point groups * @description Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given payload field. */ post: operations["recommend_point_groups"]; }; "/collections/{collection_name}/points/discover": { /** * Discover points * @description Use context and a target to find the most similar points to the target, constrained by the context. * When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. * Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. * When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. */ post: operations["discover_points"]; }; "/collections/{collection_name}/points/discover/batch": { /** * Discover batch points * @description Look for points based on target and/or positive and negative example pairs, in batch. */ post: operations["discover_batch_points"]; }; "/collections/{collection_name}/points/count": { /** * Count points * @description Count points which matches given filtering condition */ post: operations["count_points"]; }; "/collections/{collection_name}/points/query": { /** * Query points * @description Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. */ post: operations["query_points"]; }; "/collections/{collection_name}/points/query/batch": { /** * Query points in batch * @description Universally query points in batch. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. */ post: operations["query_batch_points"]; }; "/collections/{collection_name}/points/query/groups": { /** * Query points, grouped by a given payload field * @description Universally query points, grouped by a given payload field */ post: operations["query_points_groups"]; }; } export type webhooks = Record<string, never>; export interface components { schemas: { ErrorResponse: { /** * Format: float * @description Time spent to process this request */ time?: number; status?: { /** @description Description of the occurred error. */ error?: string; }; result?: Record<string, unknown> | null; }; /** * @example { * "collections": [ * { * "name": "arivx-title" * }, * { * "name": "arivx-abstract" * }, * { * "name": "medium-title" * }, * { * "name": "medium-text" * } * ] * } */ CollectionsResponse: { collections: (components["schemas"]["CollectionDescription"])[]; }; CollectionDescription: { name: string; }; /** @description Current statistics and configuration of the collection */ CollectionInfo: { status: components["schemas"]["CollectionStatus"]; optimizer_status: components["schemas"]["OptimizersStatus"]; /** * Format: uint * @description DEPRECATED: Approximate number of vectors in collection. All vectors in collection are available for querying. Calculated as `points_count x vectors_per_point`. Where `vectors_per_point` is a number of named vectors in schema. */ vectors_count?: number | null; /** * Format: uint * @description Approximate number of indexed vectors in the collection. Indexed vectors in large segments are faster to query, as it is stored in a specialized vector index. */ indexed_vectors_count?: number | null; /** * Format: uint * @description Approximate number of points (vectors + payloads) in collection. Each point could be accessed by unique id. */ points_count?: number | null; /** * Format: uint * @description Number of segments in collection. Each segment has independent vector as payload indexes */ segments_count: number; config: components["schemas"]["CollectionConfig"]; /** @description Types of stored payload */ payload_schema: { [key: string]: components["schemas"]["PayloadIndexInfo"] | undefined; }; }; /** * @description Current state of the collection. `Green` - all good. `Yellow` - optimization is running, `Red` - some operations failed and was not recovered * @enum {string} */ CollectionStatus: "green" | "yellow" | "grey" | "red"; /** @description Current state of the collection */ OptimizersStatus: OneOf<[ "ok", { error: string; } ]>; CollectionConfig: { params: components["schemas"]["CollectionParams"]; hnsw_config: components["schemas"]["HnswConfig"]; optimizer_config: components["schemas"]["OptimizersConfig"]; wal_config: components["schemas"]["WalConfig"]; /** @default null */ quantization_config?: components["schemas"]["QuantizationConfig"] | (Record<string, unknown> | null); }; CollectionParams: { vectors?: components["schemas"]["VectorsConfig"]; /** * Format: uint32 * @description Number of shards the collection has * @default 1 */ shard_number?: number; /** @description Sharding method Default is Auto - points are distributed across all available shards Custom - points are distributed across shards according to shard key */ sharding_method?: components["schemas"]["ShardingMethod"] | (Record<string, unknown> | null); /** * Format: uint32 * @description Number of replicas for each shard * @default 1 */ replication_factor?: number; /** * Format: uint32 * @description Defines how many replicas should apply the operation for us to consider it successful. Increasing this number will make the collection more resilient to inconsistencies, but will also make it fail if not enough replicas are available. Does not have any performance impact. * @default 1 */ write_consistency_factor?: number; /** * Format: uint32 * @description Defines how many additional replicas should be processing read request at the same time. Default value is Auto, which means that fan-out will be determined automatically based on the busyness of the local replica. Having more than 0 might be useful to smooth latency spikes of individual nodes. */ read_fan_out_factor?: number | null; /** * @description If true - point's payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM. * @default false */ on_disk_payload?: boolean; /** @description Configuration of the sparse vector storage */ sparse_vectors?: ({ [key: string]: components["schemas"]["SparseVectorParams"] | undefined; }) | null; }; /** * @description Vector params separator for single and multiple vector modes Single mode: * * { "size": 128, "distance": "Cosine" } * * or multiple mode: * * { "default": { "size": 128, "distance": "Cosine" } } */ VectorsConfig: components["schemas"]["VectorParams"] | ({ [key: string]: components["schemas"]["VectorParams"] | undefined; }); /** @description Params of single vector data storage */ VectorParams: { /** * Format: uint64 * @description Size of a vectors used */ size: number; distance: components["schemas"]["Distance"]; /** @description Custom params for HNSW index. If none - values from collection configuration are used. */ hnsw_config?: components["schemas"]["HnswConfigDiff"] | (Record<string, unknown> | null); /** @description Custom params for quantization. If none - values from collection configuration are used. */ quantization_config?: components["schemas"]["QuantizationConfig"] | (Record<string, unknown> | null); /** @description If true, vectors are served from disk, improving RAM usage at the cost of latency Default: false */ on_disk?: boolean | null; /** * @description Defines which datatype should be used to represent vectors in the storage. Choosing different datatypes allows to optimize memory usage and performance vs accuracy. * * - For `float32` datatype - vectors are stored as single-precision floating point numbers, 4 bytes. - For `float16` datatype - vectors are stored as half-precision floating point numbers, 2 bytes. - For `uint8` datatype - vectors are stored as unsigned 8-bit integers, 1 byte. It expects vector elements to be in range `[0, 255]`. */ datatype?: components["schemas"]["Datatype"] | (Record<string, unknown> | null); multivector_config?: components["schemas"]["MultiVectorConfig"] | (Record<string, unknown> | null); }; /** * @description Type of internal tags, build from payload Distance function types used to compare vectors * @enum {string} */ Distance: "Cosine" | "Euclid" | "Dot" | "Manhattan"; HnswConfigDiff: { /** * Format: uint * @description Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. */ m?: number | null; /** * Format: uint * @description Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. */ ef_construct?: number | null; /** * Format: uint * @description Minimal size (in kilobytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256 */ full_scan_threshold?: number | null; /** * Format: uint * @description Number of parallel threads used for background index building. If 0 - automatically select from 8 to 16. Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. On small CPUs, less threads are used. */ max_indexing_threads?: number | null; /** @description Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false */ on_disk?: boolean | null; /** * Format: uint * @description Custom M param for additional payload-aware HNSW links. If not set, default M will be used. */ payload_m?: number | null; }; QuantizationConfig: components["schemas"]["ScalarQuantization"] | components["schemas"]["ProductQuantization"] | components["schemas"]["BinaryQuantization"]; ScalarQuantization: { scalar: components["schemas"]["ScalarQuantizationConfig"]; }; ScalarQuantizationConfig: { type: components["schemas"]["ScalarType"]; /** * Format: float * @description Quantile for quantization. Expected value range in [0.5, 1.0]. If not set - use the whole range of values */ quantile?: number | null; /** @description If true - quantized vectors always will be stored in RAM, ignoring the config of main storage */ always_ram?: boolean | null; }; /** @enum {string} */ ScalarType: "int8"; ProductQuantization: { product: components["schemas"]["ProductQuantizationConfig"]; }; ProductQuantizationConfig: { compression: components["schemas"]["CompressionRatio"]; always_ram?: boolean | null; }; /** @enum {string} */ CompressionRatio: "x4" | "x8" | "x16" | "x32" | "x64"; BinaryQuantization: { binary: components["schemas"]["BinaryQuantizationConfig"]; }; BinaryQuantizationConfig: { always_ram?: boolean | null; }; /** @enum {string} */ Datatype: "float32" | "uint8" | "float16"; MultiVectorConfig: { comparator: components["schemas"]["MultiVectorComparator"]; }; /** @enum {string} */ MultiVectorComparator: "max_sim"; /** @enum {string} */ ShardingMethod: "auto" | "custom"; /** @description Params of single sparse vector data storage */ SparseVectorParams: { /** @description Custom params for index. If none - values from collection configuration are used. */ index?: components["schemas"]["SparseIndexParams"] | (Record<string, unknown> | null); /** @description Configures addition value modifications for sparse vectors. Default: none */ modifier?: components["schemas"]["Modifier"] | (Record<string, unknown> | null); }; /** @description Configuration for sparse inverted index. */ SparseIndexParams: { /** * Format: uint * @description We prefer a full scan search upto (excluding) this number of vectors. * * Note: this is number of vectors, not KiloBytes. */ full_scan_threshold?: number | null; /** @description Store index on disk. If set to false, the index will be stored in RAM. Default: false */ on_disk?: boolean | null; /** * @description Defines which datatype should be used for the index. Choosing different datatypes allows to optimize memory usage and performance vs accuracy. * * - For `float32` datatype - vectors are stored as single-precision floating point numbers, 4 bytes. - For `float16` datatype - vectors are stored as half-precision floating point numbers, 2 bytes. - For `uint8` datatype - vectors are quantized to unsigned 8-bit integers, 1 byte. Quantization to fit byte range `[0, 255]` happens during indexing automatically, so the actual vector data does not need to conform to this range. */ datatype?: components["schemas"]["Datatype"] | (Record<string, unknown> | null); }; /** * @description If used, include weight modification, which will be applied to sparse vectors at query time: None - no modification (default) Idf - inverse document frequency, based on statistics of the collection * @enum {string} */ Modifier: "none" | "idf"; /** @description Config of HNSW index */ HnswConfig: { /** * Format: uint * @description Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. */ m: number; /** * Format: uint * @description Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build index. */ ef_construct: number; /** * Format: uint * @description Minimal size (in KiloBytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256 */ full_scan_threshold: number; /** * Format: uint * @description Number of parallel threads used for background index building. If 0 - automatically select from 8 to 16. Best to keep between 8 and 16 to prevent likelihood of slow building or broken/inefficient HNSW graphs. On small CPUs, less threads are used. * @default 0 */ max_indexing_threads?: number; /** @description Store HNSW index on disk. If set to false, index will be stored in RAM. Default: false */ on_disk?: boolean | null; /** * Format: uint * @description Custom M param for hnsw graph built for payload index. If not set, default M will be used. */ payload_m?: number | null; }; OptimizersConfig: { /** * Format: double * @description The minimal fraction of deleted vectors in a segment, required to perform segment optimization */ deleted_threshold: number; /** * Format: uint * @description The minimal number of vectors in a segment, required to perform segment optimization */ vacuum_min_vector_number: number; /** * Format: uint * @description Target amount of segments optimizer will try to keep. Real amount of segments may vary depending on multiple parameters: - Amount of stored points - Current write RPS * * It is recommended to select default number of segments as a factor of the number of search threads, so that each segment would be handled evenly by one of the threads. If `default_segment_number = 0`, will be automatically selected by the number of available CPUs. */ default_segment_number: number; /** * Format: uint * @description Do not create segments larger this size (in kilobytes). Large segments might require disproportionately long indexation times, therefore it makes sense to limit the size of segments. * * If indexing speed is more important - make this parameter lower. If search speed is more important - make this parameter higher. Note: 1Kb = 1 vector of size 256 If not set, will be automatically selected considering the number of available CPUs. * @default null */ max_segment_size?: number | null; /** * Format: uint * @description Maximum size (in kilobytes) of vectors to store in-memory per segment. Segments larger than this threshold will be stored as read-only memmaped file. * * Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. * * To disable memmap storage, set this to `0`. Internally it will use the largest threshold possible. * * Note: 1Kb = 1 vector of size 256 * @default null */ memmap_threshold?: number | null; /** * Format: uint * @description Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing * * Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>. * * To disable vector indexing, set to `0`. * * Note: 1kB = 1 vector of size 256. * @default null */ indexing_threshold?: number | null; /** * Format: uint64 * @description Minimum interval between forced flushes. */ flush_interval_sec: number; /** * Format: uint * @description Max number of threads (jobs) for running optimizations per shard. Note: each optimization job will also use `max_indexing_threads` threads by itself for index building. If null - have no limit and choose dynamically to saturate CPU. If 0 - no optimization threads, optimizations will be disabled. * @default null */ max_optimization_threads?: number | null; }; WalConfig: { /** * Format: uint * @description Size of a single WAL segment in MB */ wal_capacity_mb: number; /** * Format: uint * @description Number of WAL segments to create ahead of actually used ones */ wal_segments_ahead: number; }; /** @description Display payload field type & index information */ PayloadIndexInfo: { data_type: components["schemas"]["PayloadSchemaType"]; params?: components["schemas"]["PayloadSchemaParams"] | (Record<string, unknown> | null); /** * Format: uint * @description Number of points indexed with this index */ points: number; }; /** * @description All possible names of payload types * @enum {string} */ PayloadSchemaType: "keyword" | "integer" | "float" | "geo" | "text" | "bool" | "datetime" | "uuid"; /** @description Payload type with parameters */ PayloadSchemaParams: components["schemas"]["KeywordIndexParams"] | components["schemas"]["IntegerIndexParams"] | components["schemas"]["FloatIndexParams"] | components["schemas"]["GeoIndexParams"] | components["schemas"]["TextIndexParams"] | components["schemas"]["BoolIndexParams"] | components["schemas"]["DatetimeIndexParams"] | components["schemas"]["UuidIndexParams"]; KeywordIndexParams: { type: components["schemas"]["KeywordIndexType"]; /** @description If true - used for tenant optimization. Default: false. */ is_tenant?: boolean | null; /** @description If true, store the index on disk. Default: false. */ on_disk?: boolean | null; }; /** @enum {string} */ KeywordIndexType: "keyword"; IntegerIndexParams: { type: components["schemas"]["IntegerIndexType"]; /** @description If true - support direct lookups. */ lookup: boolean; /** @description If true - support ranges filters. */ range: boolean; /** @description If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. */ is_principal?: boolean | null; /** @description If true, store the index on disk. Default: false. */ on_disk?: boolean | null; }; /** @enum {string} */ IntegerIndexType: "integer"; FloatIndexParams: { type: components["schemas"]["FloatIndexType"]; /** @description If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. */ is_principal?: boolean | null; /** @description If true, store the index on disk. Default: false. */ on_disk?: boolean | null; }; /** @enum {string} */ FloatIndexType: "float"; GeoIndexParams: { type: components["schemas"]["GeoIndexType"]; }; /** @enum {string} */ GeoIndexType: "geo"; TextIndexParams: { type: components["schemas"]["TextIndexType"]; tokenizer?: components["schemas"]["TokenizerType"]; /** Format: uint */ min_token_len?: number | null; /** Format: uint */ max_token_len?: number | null; /** @description If true, lowercase all tokens. Default: true. */ lowercase?: boolean | null; }; /** @enum {string} */ TextIndexType: "text"; /** @enum {string} */ TokenizerType: "prefix" | "whitespace" | "word" | "multilingual"; BoolIndexParams: { type: components["schemas"]["BoolIndexType"]; }; /** @enum {string} */ BoolIndexType: "bool"; DatetimeIndexParams: { type: components["schemas"]["DatetimeIndexType"]; /** @description If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. */ is_principal?: boolean | null; /** @description If true, store the index on disk. Default: false. */ on_disk?: boolean | null; }; /** @enum {string} */ DatetimeIndexType: "datetime"; UuidIndexParams: { type: components["schemas"]["UuidIndexType"]; /** @description If true - used for tenant optimization. */ is_tenant?: boolean | null; /** @description If true, store the index on disk. Default: false. */ on_disk?: boolean | null; }; /** @enum {string} */ UuidIndexType: "uuid"; PointRequest: { /** @description Specify in which shards to look for the points, if not specified - look in all shards */ shard_key?: components["schemas"]["ShardKeySelector"] | (Record<string, unknown> | null); /** @description Look for points with ids */ ids: (components["schemas"]["ExtendedPointId"])[]; /** @description Select which payload to return with the response. Default is true. */ with_payload?: components["schemas"]["WithPayloadInterface"] | (Record<string, unknown> | null); with_vector?: components["schemas"]["WithVector"]; }; ShardKeySelector: components["schemas"]["ShardKey"] | (components["schemas"]["ShardKey"])[]; ShardKey: string | number; /** @description Type, used for specifying point ID in user interface */ ExtendedPointId: number | string; /** @description Options for specifying which payload to include or not */ WithPayloadInterface: boolean | (string)[] | components["schemas"]["PayloadSelector"]; /** @description Specifies how to treat payload selector */ PayloadSelector: components["schemas"]["PayloadSelectorInclude"] | components["schemas"]["PayloadSelectorExclude"]; PayloadSelectorInclude: { /** @description Only include this payload keys */ include: (string)[]; }; PayloadSelectorExclude: { /** @description Exclude this fields from returning payload */ exclude: (string)[]; }; /** @description Options for specifying which vector to include */ WithVector: boolean | (string)[]; /** @description Point data */ Record: { id: components["schemas"]["ExtendedPointId"]; /** @description Payload - values assigned to the point */ payload?: components["schemas"]["Payload"] | (Record<string, unknown> | null); /** @description Vector of the point */ vector?: components["schemas"]["VectorStruct"] | (Record<string, unknown> | null); /** @description Shard Key */ shard_key?: components["schemas"]["ShardKey"] | (Record<string, unknown> | null); order_value?: components["schemas"]["OrderValue"] | (Record<string, unknown> | null); }; /** * @example { * "city": "London", * "color": "green" * } */ Payload: { [key: string]: unknown; }; /** @description Full vector data per point separator with single and multiple vector modes */ VectorStruct: (number)[] | ((number)[])[] | ({ [key: string]: components["schemas"]["Vector"] | undefined; }); Vector: (number)[] | components["schemas"]["SparseVector"] | ((number)[])[]; /** @description Sparse vector structure */ SparseVector: { /** @description Indices must be unique */ indices: (number)[]; /** @description Values and indices must be the same length */ values: (number)[]; }; OrderValue: number; /** @description Search request. Holds all conditions and parameters for the search of most similar points by vector similarity given the filtering restrictions. */ SearchRequest: { /** @description Specify in which shards to look for the points, if not specified - look in all shards */ shard_key?: components["schemas"]["ShardKeySelector"] | (Record<string, unknown> | null); vector: components["schemas"]["NamedVectorStruct"]; /** @description Look only for points which satisfies this conditions */ filter?: components["schemas"]["Filter"] | (Record<string, unknown> | null); /** @description Additional search params */ params?: components["schemas"]["SearchParams"] | (Record<string, unknown> | null); /** * Format: uint * @description Max number of result to return */ limit: number; /** * Format: uint * @description Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. */ offset?: number | null; /** @description Select which payload to return with the response. Default is false. */ with_payload?: components["schemas"]["WithPayloadInterface"] | (Record<string, unknown> | null); /** * @description Options for specifying which vectors to include into response. Default is false. * @default null */ with_vector?: components["schemas"]["WithVector"] | (Record<string, unknown> | null); /** * Format: float * @description Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. */ score_threshold?: number | null; }; /** * @description Vector data separator for named and unnamed modes Unnamed mode: * * { "vector": [1.0, 2.0, 3.0] } * * or named mode: * * { "vector": { "vector": [1.0, 2.0, 3.0], "name": "image-embeddings" } } */ NamedVectorStruct: (number)[] | components["schemas"]["NamedVector"] | components["schemas"]["NamedSparseVector"]; /** @description Dense vector data with name */ NamedVector: { /** @description Name of vector data */ name: string; /** @description Vector data */ vector: (number)[]; }; /** @description Sparse vector data with name */ NamedSparseVector: { /** @description Name of vector data */ name: string; vector: components["schemas"]["SparseVector"]; }; Filter: { /** * @description At least one of those conditions should match * @default null */ should?: components["schemas"]["Condition"] | (components["schemas"]["Condition"])[] | (Record<string, unknown> | null); /** @description At least minimum amount of given conditions should match */ min_should?: components["schemas"]["MinShould"] | (Record<string, unknown> | null); /** * @description All conditions must match * @default null */ must?: components["schemas"]["Condition"] | (components["schemas"]["Condition"])[] | (Record<string, unknown> | null); /** * @description All conditions must NOT match * @default null */ must_not?: components["schemas"]["Condition"] | (components["schemas"]["Condition"])[] | (Record<string, unknown> | null); }; Condition: components["schemas"]["FieldCondition"] | components["schemas"]["IsEmptyCondition"] | components["schemas"]["IsNullCondition"] | components["schemas"]["HasIdCondition"] | components["schemas"]["NestedCondition"] | components["schemas"]["Filter"]; /** @description All possible payload filtering conditions */ FieldCondition: { /** @description Payload key */ key: string; /** @description Check if point has field with a given value */ match?: components["schemas"]["Match"] | (Record<string, unknown> | null); /** @description Check if points value lies in a given range */ range?: components["schemas"]["RangeInterface"] | (Record<string, unknown> | null); /** @description Check if points geo location lies in a given area */ geo_bounding_box?: components["schemas"]["GeoBoundingBox"] | (Record<string, unknown> | null); /** @description Check if geo point is within a given radius */ geo_radius?: components["schemas"]["GeoRadius"] | (Record<string, unknown> | null); /** @description Check if geo point is within a given polygon */ geo_polygon?: components["schemas"]["GeoPolygon"] | (Record<string, unknown> | null); /** @description Check number of values of the field */ values_count?: components["schemas"]["ValuesCount"] | (Record<stri