react-native-executorch
Version:
An easy way to run AI models in React Native with ExecuTorch
71 lines (62 loc) • 3.48 kB
JavaScript
;
import { CocoLabel } from '../constants/commonVision';
export { CocoLabel };
/**
* Represents a bounding box for a detected object in an image.
* @category Types
* @property {number} x1 - The x-coordinate of the top-left corner of the bounding box.
* @property {number} y1 - The y-coordinate of the top-left corner of the bounding box.
* @property {number} x2 - The x-coordinate of the bottom-right corner of the bounding box.
* @property {number} y2 - The y-coordinate of the bottom-right corner of the bounding box.
*/
/**
* Represents a detected object within an image, including its bounding box, label, and confidence score.
* @category Types
* @typeParam L - The label enum type for the detected object. Defaults to {@link CocoLabel}.
* @property {Bbox} bbox - The bounding box of the detected object, defined by its top-left (x1, y1) and bottom-right (x2, y2) coordinates.
* @property {keyof L} label - The class label of the detected object.
* @property {number} score - The confidence score of the detection, typically ranging from 0 to 1.
*/
/**
* Options for configuring object detection inference.
* @category Types
* @typeParam L - The label enum type for filtering classes of interest.
* @property {number} [detectionThreshold] - Minimum confidence score for detections (0-1). Defaults to model-specific value.
* @property {number} [iouThreshold] - IoU threshold for non-maximum suppression (0-1). Defaults to model-specific value.
* @property {number} [inputSize] - Input size for multi-method models (e.g., 384, 512, 640 for YOLO). Required for YOLO models if not using default.
* @property {(keyof L)[]} [classesOfInterest] - Optional array of class labels to filter detections. Only detections matching these classes will be returned.
*/
/**
* Per-model config for {@link ObjectDetectionModule.fromModelName}.
* Each model name maps to its required fields.
* @category Types
*/
/**
* Union of all built-in object detection model names.
* @category Types
*/
/**
* Configuration for a custom object detection model.
* @category Types
* @typeParam T - The label enum type for the model.
* @property {T} labelMap - The label mapping for the model.
* @property {object} [preprocessorConfig] - Optional preprocessing configuration with normalization parameters.
* @property {number} [defaultDetectionThreshold] - Default detection confidence threshold (0-1).
* @property {number} [defaultIouThreshold] - Default IoU threshold for non-maximum suppression (0-1).
* @property {readonly number[]} [availableInputSizes] - For multi-method models, the available input sizes (e.g., [384, 512, 640]).
* @property {number} [defaultInputSize] - For multi-method models, the default input size to use.
*/
/**
* Props for the `useObjectDetection` hook.
* @typeParam C - A {@link ObjectDetectionModelSources} config specifying which built-in model to load.
* @category Types
* @property model - The model config containing `modelName` and `modelSource`.
* @property {boolean} [preventLoad] - Boolean that can prevent automatic model loading (and downloading the data if you load it for the first time) after running the hook.
*/
/**
* Return type for the `useObjectDetection` hook.
* Manages the state and operations for Computer Vision object detection tasks.
* @typeParam L - The {@link LabelEnum} representing the model's class labels.
* @category Types
*/
//# sourceMappingURL=objectDetection.js.map