@anpanman/opencv_ts
Version:
Experimental WIP TypeScript typings and OpenCV.js/wasm loader.
806 lines (777 loc) • 233 kB
TypeScript
import { int, float, double } from '../core/_types'
import { Mat } from '../core/Mat'
import { IntVector, FloatVector, PointVector, MatVector, RectVector, KeyPointVector, DMatchVector, DMatchVectorVector } from '../core/vectors'
import { DrawMatchesFlags } from './enums'
import { SizeLike, PointLike, Point2fLike, RectLike, TermCriteriaLike, ScalarLike, RotatedRectLike, MomentsLike } from '../core/valueObjects'
/**
* @brief Finds edges in an image using the Canny algorithm @cite Canny86 .
*
* The function finds edges in the input image and marks them in the output map edges using the
* Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
* largest value is used to find initial segments of strong edges. See
* <http://en.wikipedia.org/wiki/Canny_edge_detector>
*
* @param image 8-bit input image.
* @param edges output edge map; single channels 8-bit image, which has the same size as image .
* @param threshold1 first threshold for the hysteresis procedure.
* @param threshold2 second threshold for the hysteresis procedure.
* @param apertureSize aperture size for the Sobel operator.
* @param L2gradient a flag, indicating whether a more accurate \f$L_2\f$ norm
* \f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to calculate the image gradient magnitude (
* L2gradient=true ), or whether the default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough (
* L2gradient=false ).
*/
export function Canny(image: Mat, edges: Mat, threshold1: double, threshold2: double, apertureSize?: int, L2gradient?: boolean): void
/**
* \overload
*
* Finds edges in an image using the Canny algorithm with custom image gradient.
*
* @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
* @param dy 16-bit y derivative of input image (same type as dx).
* @param edges output edge map; single channels 8-bit image, which has the same size as image .
* @param threshold1 first threshold for the hysteresis procedure.
* @param threshold2 second threshold for the hysteresis procedure.
* @param L2gradient a flag, indicating whether a more accurate \f$L_2\f$ norm
* \f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to calculate the image gradient magnitude (
* L2gradient=true ), or whether the default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough (
* L2gradient=false ).
*/
export function Canny(dx: Mat, dy: Mat, edges: Mat, threshold1: double, threshold2: double, L2gradient?: boolean): void
/**
* @brief Blurs an image using a Gaussian filter.
*
* The function convolves the source image with the specified Gaussian kernel. In-place filtering is
* supported.
*
* @param src input image; the image can have any number of channels, which are processed
* independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
* @param dst output image of the same size and type as src.
* @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
* positive and odd. Or, they can be zero's and then they are computed from sigma.
* @param sigmaX Gaussian kernel standard deviation in X direction.
* @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
* equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
* respectively (see #getGaussianKernel for details); to fully control the result regardless of
* possible future modifications of all this semantics, it is recommended to specify all of ksize,
* sigmaX, and sigmaY.
* @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
*
* @sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
*/
export function GaussianBlur(src: Mat, dst: Mat, ksize: SizeLike, sigmaX: double, sigmaY?: double, borderType?: int): void
/**
* @brief Finds circles in a grayscale image using the Hough transform.
*
* The function finds circles in a grayscale image using a modification of the Hough transform.
*
* Example: :
* @include snippets/imgproc_HoughLinesCircles.cpp
*
* @note Usually the function detects the centers of circles well. However, it may fail to find correct
* radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
* you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
* to return centers only without radius search, and find the correct radius using an additional procedure.
*
* It also helps to smooth image a bit unless it's already soft. For example,
* GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
*
* @param image 8-bit, single-channel, grayscale input image.
* @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
* floating-point vector \f$(x, y, radius)\f$ or \f$(x, y, radius, votes)\f$ .
* @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
* @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
* dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
* half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
* unless some small very circles need to be detected.
* @param minDist Minimum distance between the centers of the detected circles. If the parameter is
* too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
* too large, some circles may be missed.
* @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
* it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
* Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
* shough normally be higher, such as 300 or normally exposed and contrasty images.
* @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
* accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
* false circles may be detected. Circles, corresponding to the larger accumulator values, will be
* returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
* The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
* If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
* But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
* @param minRadius Minimum circle radius.
* @param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, #HOUGH_GRADIENT returns
* centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
*
* @sa fitEllipse, minEnclosingCircle
*/
export function HoughCircles(image: Mat, circles: Mat, method: int, dp: double, minDist: double, param1?: double, param2?: double, minRadius?: int, maxRadius?: int): void
/**
* @brief Finds lines in a binary image using the standard Hough transform.
*
* The function implements the standard or standard multi-scale Hough transform algorithm for line
* detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough
* transform.
*
* @param image 8-bit, single-channel binary source image. The image may be modified by the function.
* @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
* \f$(\rho, \theta)\f$ or \f$(\rho, \theta, \textrm{votes})\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of
* the image). \f$\theta\f$ is the line rotation angle in radians (
* \f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ).
* \f$\textrm{votes}\f$ is the value of accumulator.
* @param rho Distance resolution of the accumulator in pixels.
* @param theta Angle resolution of the accumulator in radians.
* @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
* votes ( \f$>\texttt{threshold}\f$ ).
* @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
* The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
* rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
* parameters should be positive.
* @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
* @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
* Must fall between 0 and max_theta.
* @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.
* Must fall between min_theta and CV_PI.
*/
export function HoughLines(image: Mat, lines: Mat, rho: double, theta: double, threshold: int, srn?: double, stn?: double, min_theta?: double, max_theta?: double): void
/**
* @brief Finds line segments in a binary image using the probabilistic Hough transform.
*
* The function implements the probabilistic Hough transform algorithm for line detection, described
* in @cite Matas00
*
* See the line detection example below:
* @include snippets/imgproc_HoughLinesP.cpp
* This is a sample picture the function parameters have been tuned for:
*
* 
*
* And this is the output of the above program in case of the probabilistic Hough transform:
*
* 
*
* @param image 8-bit, single-channel binary source image. The image may be modified by the function.
* @param lines Output vector of lines. Each line is represented by a 4-element vector
* \f$(x_1, y_1, x_2, y_2)\f$ , where \f$(x_1,y_1)\f$ and \f$(x_2, y_2)\f$ are the ending points of each detected
* line segment.
* @param rho Distance resolution of the accumulator in pixels.
* @param theta Angle resolution of the accumulator in radians.
* @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
* votes ( \f$>\texttt{threshold}\f$ ).
* @param minLineLength Minimum line length. Line segments shorter than that are rejected.
* @param maxLineGap Maximum allowed gap between points on the same line to link them.
*
* @sa LineSegmentDetector
*/
export function HoughLinesP(image: Mat, lines: Mat, rho: double, theta: double, threshold: int, minLineLength?: double, maxLineGap?: double): void
/**
* @brief Calculates the Laplacian of an image.
*
* The function calculates the Laplacian of the source image by adding up the second x and y
* derivatives calculated using the Sobel operator:
*
* \f[\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\f]
*
* This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
* with the following \f$3 \times 3\f$ aperture:
*
* \f[\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\f]
*
* @param src Source image.
* @param dst Destination image of the same size and the same number of channels as src .
* @param ddepth Desired depth of the destination image.
* @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
* details. The size must be positive and odd.
* @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
* applied. See #getDerivKernels for details.
* @param delta Optional delta value that is added to the results prior to storing them in dst .
* @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
* @sa Sobel, Scharr
*/
export function Laplacian(src: Mat, dst: Mat, ddepth: int, ksize?: int, scale?: double, delta?: double, borderType?: int): void
/**
* @brief Converts a rotation matrix to a rotation vector or vice versa.
*
* @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
* @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
* @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
* derivatives of the output array components with respect to the input array components.
*
* \f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f]
*
* Inverse transformation can be also done easily, since
*
* \f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f]
*
* A rotation vector is a convenient and most compact representation of a rotation matrix (since any
* rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
* optimization procedures like @ref calibrateCamera, @ref stereoCalibrate, or @ref solvePnP .
*
* @note More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
* can be found in:
* - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi @cite Gallego2014ACF
*
* @note Useful information on SE(3) and Lie Groups can be found in:
* - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco @cite blanco2010tutorial
* - Lie Groups for 2D and 3D Transformation, Ethan Eade @cite Eade17
* - A micro Lie theory for state estimation in robotics, Joan Sol�, J�r�mie Deray, Dinesh Atchuthan @cite Sol2018AML
*/
export function Rodrigues(src: Mat, dst: Mat, jacobian?: Mat): void
/**
* @brief Calculates the first x- or y- image derivative using Scharr operator.
*
* The function computes the first x- or y- spatial image derivative using the Scharr operator. The
* call
*
* \f[\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\f]
*
* is equivalent to
*
* \f[\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\f]
*
* @param src input image.
* @param dst output image of the same size and the same number of channels as src.
* @param ddepth output image depth, see @ref filter_depths "combinations"
* @param dx order of the derivative x.
* @param dy order of the derivative y.
* @param scale optional scale factor for the computed derivative values; by default, no scaling is
* applied (see #getDerivKernels for details).
* @param delta optional delta value that is added to the results prior to storing them in dst.
* @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
* @sa cartToPolar
*/
export function Scharr(src: Mat, dst: Mat, ddepth: int, dx: int, dy: int, scale?: double, delta?: double, borderType?: int): void
/**
* @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
*
* In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to
* calculate the derivative. When \f$\texttt{ksize = 1}\f$, the \f$3 \times 1\f$ or \f$1 \times 3\f$
* kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
* or the second x- or y- derivatives.
*
* There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the \f$3\times3\f$ Scharr
* filter that may give more accurate results than the \f$3\times3\f$ Sobel. The Scharr aperture is
*
* \f[\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\f]
*
* for the x-derivative, or transposed for the y-derivative.
*
* The function calculates an image derivative by convolving the image with the appropriate kernel:
*
* \f[\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\f]
*
* The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
* resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
* or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
* case corresponds to a kernel of:
*
* \f[\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\f]
*
* The second case corresponds to a kernel of:
*
* \f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f]
*
* @param src input image.
* @param dst output image of the same size and the same number of channels as src .
* @param ddepth output image depth, see @ref filter_depths "combinations"; in the case of
* 8-bit input images it will result in truncated derivatives.
* @param dx order of the derivative x.
* @param dy order of the derivative y.
* @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
* @param scale optional scale factor for the computed derivative values; by default, no scaling is
* applied (see #getDerivKernels for details).
* @param delta optional delta value that is added to the results prior to storing them in dst.
* @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
* @sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
*/
export function Sobel(src: Mat, dst: Mat, ddepth: int, dx: int, dy: int, ksize?: int, scale?: double, delta?: double, borderType?: int): void
/**
* @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.
*
* The function cv::absdiff calculates:
* Absolute difference between two arrays when they have the same
* size and type:
* \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2}(I)|)\f]
* Absolute difference between an array and a scalar when the second
* array is constructed from Scalar or has as many elements as the
* number of channels in `src1`:
* \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2} |)\f]
* Absolute difference between a scalar and an array when the first
* array is constructed from Scalar or has as many elements as the
* number of channels in `src2`:
* \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1} - \texttt{src2}(I) |)\f]
* where I is a multi-dimensional index of array elements. In case of
* multi-channel arrays, each channel is processed independently.
* @note Saturation is not applied when the arrays have the depth CV_32S.
* You may even get a negative value in the case of overflow.
* @param src1 first input array or a scalar.
* @param src2 second input array or a scalar.
* @param dst output array that has the same size and type as input arrays.
* @sa cv::abs(const Mat&)
*/
export function absdiff(src1: Mat, src2: Mat, dst: Mat): void
/**
* @brief Applies an adaptive threshold to an array.
*
* The function transforms a grayscale image to a binary image according to the formulae:
* - **THRESH_BINARY**
* \f[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\f]
* - **THRESH_BINARY_INV**
* \f[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\f]
* where \f$T(x,y)\f$ is a threshold calculated individually for each pixel (see adaptiveMethod parameter).
*
* The function can process the image in-place.
*
* @param src Source 8-bit single-channel image.
* @param dst Destination image of the same size and the same type as src.
* @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied
* @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes.
* The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries.
* @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV,
* see #ThresholdTypes.
* @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the
* pixel: 3, 5, 7, and so on.
* @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it
* is positive but may be zero or negative as well.
*
* @sa threshold, blur, GaussianBlur
*/
export function adaptiveThreshold(src: Mat, dst: Mat, maxValue: double, adaptiveMethod: int, thresholdType: int, blockSize: int, C: double): void
/**
* @brief Calculates the per-element sum of two arrays or an array and a scalar.
*
* The function add calculates:
* - Sum of two arrays when both input arrays have the same size and the same number of channels:
* \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]
* - Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of
* elements as `src1.channels()`:
* \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f]
* - Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of
* elements as `src2.channels()`:
* \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} + \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f]
* where `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each
* channel is processed independently.
*
* The first function in the list above can be replaced with matrix expressions:
* @code{.cpp}
* dst = src1 + src2;
* dst += src1; // equivalent to add(dst, src1, dst);
* @endcode
* The input arrays and the output array can all have the same or different depths. For example, you
* can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit
* floating-point array. Depth of the output array is determined by the dtype parameter. In the second
* and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can
* be set to the default -1. In this case, the output array will have the same depth as the input
* array, be it src1, src2 or both.
* @note Saturation is not applied when the output array has the depth CV_32S. You may even get
* result of an incorrect sign in the case of overflow.
* @param src1 first input array or a scalar.
* @param src2 second input array or a scalar.
* @param dst output array that has the same size and number of channels as the input array(s); the
* depth is defined by dtype or src1/src2.
* @param mask optional operation mask - 8-bit single channel array, that specifies elements of the
* output array to be changed.
* @param dtype optional depth of the output array (see the discussion below).
* @sa subtract, addWeighted, scaleAdd, Mat::convertTo
*/
export function add(src1: Mat, src2: Mat, dst: Mat, mask?: Mat, dtype?: int): void
/**
* @brief Calculates the weighted sum of two arrays.
*
* The function addWeighted calculates the weighted sum of two arrays as follows:
* \f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} + \texttt{src2} (I)* \texttt{beta} + \texttt{gamma} )\f]
* where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each
* channel is processed independently.
* The function can be replaced with a matrix expression:
* @code{.cpp}
* dst = src1*alpha + src2*beta + gamma;
* @endcode
* @note Saturation is not applied when the output array has the depth CV_32S. You may even get
* result of an incorrect sign in the case of overflow.
* @param src1 first input array.
* @param alpha weight of the first array elements.
* @param src2 second input array of the same size and channel number as src1.
* @param beta weight of the second array elements.
* @param gamma scalar added to each sum.
* @param dst output array that has the same size and number of channels as the input arrays.
* @param dtype optional depth of the output array; when both input arrays have the same depth, dtype
* can be set to -1, which will be equivalent to src1.depth().
* @sa add, subtract, scaleAdd, Mat::convertTo
*/
export function addWeighted(src1: Mat, alpha: double, src2: Mat, beta: double, gamma: double, dst: Mat, dtype?: int): void
/**
* @brief Approximates a polygonal curve(s) with the specified precision.
*
* The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less
* vertices so that the distance between them is less or equal to the specified precision. It uses the
* Douglas-Peucker algorithm <http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm>
*
* @param curve Input vector of a 2D point stored in std::vector or Mat
* @param approxCurve Result of the approximation. The type should match the type of the input curve.
* @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance
* between the original curve and its approximation.
* @param closed If true, the approximated curve is closed (its first and last vertices are
* connected). Otherwise, it is not closed.
*/
export function approxPolyDP(curve: Mat, approxCurve: Mat, epsilon: double, closed: boolean): void
/**
* @brief Calculates a contour perimeter or a curve length.
*
* The function computes a curve length or a closed contour perimeter.
*
* @param curve Input vector of 2D points, stored in std::vector or Mat.
* @param closed Flag indicating whether the curve is closed or not.
*/
export function arcLength(curve: Mat, closed: boolean): double
/**
* @brief Applies the bilateral filter to an image.
*
* The function applies bilateral filtering to the input image, as described in
* http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
* bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
* very slow compared to most filters.
*
* _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\<
* 10), the filter will not have much effect, whereas if they are large (\> 150), they will have a very
* strong effect, making the image look "cartoonish".
*
* _Filter size_: Large filters (d \> 5) are very slow, so it is recommended to use d=5 for real-time
* applications, and perhaps d=9 for offline applications that need heavy noise filtering.
*
* This filter does not work inplace.
* @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
* @param dst Destination image of the same size and type as src .
* @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
* it is computed from sigmaSpace.
* @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
* farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
* in larger areas of semi-equal color.
* @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
* farther pixels will influence each other as long as their colors are close enough (see sigmaColor
* ). When d\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
* proportional to sigmaSpace.
* @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes
*/
export function bilateralFilter(src: Mat, dst: Mat, d: int, sigmaColor: double, sigmaSpace: double, borderType?: int): void
/**
* @brief computes bitwise conjunction of the two arrays (dst = src1 & src2)
* Calculates the per-element bit-wise conjunction of two arrays or an
* array and a scalar.
*
* The function cv::bitwise_and calculates the per-element bit-wise logical conjunction for:
* Two arrays when src1 and src2 have the same size:
* \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* An array and a scalar when src2 is constructed from Scalar or has
* the same number of elements as `src1.channels()`:
* \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
* A scalar and an array when src1 is constructed from Scalar or has
* the same number of elements as `src2.channels()`:
* \f[\texttt{dst} (I) = \texttt{src1} \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* In case of floating-point arrays, their machine-specific bit
* representations (usually IEEE754-compliant) are used for the operation.
* In case of multi-channel arrays, each channel is processed
* independently. In the second and third cases above, the scalar is first
* converted to the array type.
* @param src1 first input array or a scalar.
* @param src2 second input array or a scalar.
* @param dst output array that has the same size and type as the input
* arrays.
* @param mask optional operation mask, 8-bit single channel array, that
* specifies elements of the output array to be changed.
*/
export function bitwise_and(src1: Mat, src2: Mat, dst: Mat, mask?: Mat): void
/**
* @brief Inverts every bit of an array.
*
* The function cv::bitwise_not calculates per-element bit-wise inversion of the input
* array:
* \f[\texttt{dst} (I) = \neg \texttt{src} (I)\f]
* In case of a floating-point input array, its machine-specific bit
* representation (usually IEEE754-compliant) is used for the operation. In
* case of multi-channel arrays, each channel is processed independently.
* @param src input array.
* @param dst output array that has the same size and type as the input
* array.
* @param mask optional operation mask, 8-bit single channel array, that
* specifies elements of the output array to be changed.
*/
export function bitwise_not(src: Mat, dst: Mat, mask?: Mat): void
/**
* @brief Calculates the per-element bit-wise disjunction of two arrays or an
* array and a scalar.
*
* The function cv::bitwise_or calculates the per-element bit-wise logical disjunction for:
* Two arrays when src1 and src2 have the same size:
* \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* An array and a scalar when src2 is constructed from Scalar or has
* the same number of elements as `src1.channels()`:
* \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
* A scalar and an array when src1 is constructed from Scalar or has
* the same number of elements as `src2.channels()`:
* \f[\texttt{dst} (I) = \texttt{src1} \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* In case of floating-point arrays, their machine-specific bit
* representations (usually IEEE754-compliant) are used for the operation.
* In case of multi-channel arrays, each channel is processed
* independently. In the second and third cases above, the scalar is first
* converted to the array type.
* @param src1 first input array or a scalar.
* @param src2 second input array or a scalar.
* @param dst output array that has the same size and type as the input
* arrays.
* @param mask optional operation mask, 8-bit single channel array, that
* specifies elements of the output array to be changed.
*/
export function bitwise_or(src1: Mat, src2: Mat, dst: Mat, mask?: Mat): void
/**
* @brief Calculates the per-element bit-wise "exclusive or" operation on two
* arrays or an array and a scalar.
*
* The function cv::bitwise_xor calculates the per-element bit-wise logical "exclusive-or"
* operation for:
* Two arrays when src1 and src2 have the same size:
* \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* An array and a scalar when src2 is constructed from Scalar or has
* the same number of elements as `src1.channels()`:
* \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
* A scalar and an array when src1 is constructed from Scalar or has
* the same number of elements as `src2.channels()`:
* \f[\texttt{dst} (I) = \texttt{src1} \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* In case of floating-point arrays, their machine-specific bit
* representations (usually IEEE754-compliant) are used for the operation.
* In case of multi-channel arrays, each channel is processed
* independently. In the 2nd and 3rd cases above, the scalar is first
* converted to the array type.
* @param src1 first input array or a scalar.
* @param src2 second input array or a scalar.
* @param dst output array that has the same size and type as the input
* arrays.
* @param mask optional operation mask, 8-bit single channel array, that
* specifies elements of the output array to be changed.
*/
export function bitwise_xor(src1: Mat, src2: Mat, dst: Mat, mask?: Mat): void
/**
* @brief Blurs an image using the normalized box filter.
*
* The function smooths an image using the kernel:
*
* \f[\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}\f]
*
* The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(), ksize,
* anchor, true, borderType)`.
*
* @param src input image; it can have any number of channels, which are processed independently, but
* the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
* @param dst output image of the same size and type as src.
* @param ksize blurring kernel size.
* @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
* center.
* @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
* @sa boxFilter, bilateralFilter, GaussianBlur, medianBlur
*/
export function blur(src: Mat, dst: Mat, ksize: SizeLike, anchor?: PointLike, borderType?: int): void
/**
* @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
*
* The function calculates and returns the minimal up-right bounding rectangle for the specified point set or
* non-zero pixels of gray-scale image.
*
* @param array Input gray-scale image or 2D point set, stored in std::vector or Mat.
*/
export function boundingRect(array: Mat): RectLike
/**
* @brief Blurs an image using the box filter.
*
* The function smooths an image using the kernel:
*
* \f[\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}\f]
*
* where
*
* \f[\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}\f]
*
* Unnormalized box filter is useful for computing various integral characteristics over each pixel
* neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
* algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
*
* @param src input image.
* @param dst output image of the same size and type as src.
* @param ddepth the output image depth (-1 to use src.depth()).
* @param ksize blurring kernel size.
* @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
* center.
* @param normalize flag, specifying whether the kernel is normalized by its area or not.
* @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
* @sa blur, bilateralFilter, GaussianBlur, medianBlur, integral
*/
export function boxFilter(src: Mat, dst: Mat, ddepth: int, ksize: SizeLike, anchor?: PointLike, normalize?: boolean, borderType?: int): void
/**
* @overload
*/
export function calcBackProject(images: MatVector, channels: IntVector|int[], hist: Mat, dst: Mat, ranges: FloatVector|float[], scale: double): void
/**
* @overload
*/
export function calcHist(images: MatVector, channels: IntVector|int[], mask: Mat, hist: Mat, histSize: IntVector|int[], ranges: FloatVector|float[], accumulate?: boolean): void
/**
* @brief Computes a dense optical flow using the Gunnar Farneback's algorithm.
*
* @param prev first 8-bit single-channel input image.
* @param next second input image of the same size and the same type as prev.
* @param flow computed flow image that has the same size as prev and type CV_32FC2.
* @param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
* pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
* one.
* @param levels number of pyramid layers including the initial image; levels=1 means that no extra
* layers are created and only the original images are used.
* @param winsize averaging window size; larger values increase the algorithm robustness to image
* noise and give more chances for fast motion detection, but yield more blurred motion field.
* @param iterations number of iterations the algorithm does at each pyramid level.
* @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
* larger values mean that the image will be approximated with smoother surfaces, yielding more
* robust algorithm and more blurred motion field, typically poly_n =5 or 7.
* @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
* basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a
* good value would be poly_sigma=1.5.
* @param flags operation flags that can be a combination of the following:
* - **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation.
* - **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$
* filter instead of a box filter of the same size for optical flow estimation; usually, this
* option gives z more accurate flow than with a box filter, at the cost of lower speed;
* normally, winsize for a Gaussian window should be set to a larger value to achieve the same
* level of robustness.
*
* The function finds an optical flow for each prev pixel using the @cite Farneback2003 algorithm so that
*
* \f[\texttt{prev} (y,x) \sim \texttt{next} ( y + \texttt{flow} (y,x)[1], x + \texttt{flow} (y,x)[0])\f]
*
* @note
*
* - An example using the optical flow algorithm described by Gunnar Farneback can be found at
* opencv_source_code/samples/cpp/fback.cpp
* - (Python) An example using the optical flow algorithm described by Gunnar Farneback can be
* found at opencv_source_code/samples/python/opt_flow.py
*/
export function calcOpticalFlowFarneback(prev: Mat, next: Mat, flow: Mat, pyr_scale: double, levels: int, winsize: int, iterations: int, poly_n: int, poly_sigma: double, flags: int): void
/**
* @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
* pyramids.
*
* @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
* @param nextImg second input image or pyramid of the same size and the same type as prevImg.
* @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
* single-precision floating-point numbers.
* @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
* containing the calculated new positions of input features in the second image; when
* OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
* @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
* the flow for the corresponding features has been found, otherwise, it is set to 0.
* @param err output vector of errors; each element of the vector is set to an error for the
* corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
* found then the error is not defined (use the status parameter to find such cases).
* @param winSize size of the search window at each pyramid level.
* @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
* level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
* algorithm will use as many levels as pyramids have but no more than maxLevel.
* @param criteria parameter, specifying the termination criteria of the iterative search algorithm
* (after the specified maximum number of iterations criteria.maxCount or when the search window
* moves by less than criteria.epsilon.
* @param flags operation flags:
* - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
* not set, then prevPts is copied to nextPts and is considered the initial estimate.
* - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
* minEigThreshold description); if the flag is not set, then L1 distance between patches
* around the original and a moved point, divided by number of pixels in a window, is used as a
* error measure.
* @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
* optical flow equations (this matrix is called a spatial gradient matrix in @cite Bouguet00), divided
* by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
* feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
* performance boost.
*
* The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
* @cite Bouguet00 . The function is parallelized with the TBB library.
*
* @note
*
* - An example using the Lucas-Kanade optical flow algorithm can be found at
* opencv_source_code/samples/cpp/lkdemo.cpp
* - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
* opencv_source_code/samples/python/lk_track.py
* - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
* opencv_source_code/samples/python/lk_homography.py
*/
export function calcOpticalFlowPyrLK(prevImg: Mat, nextImg: Mat, prevPts: Mat, nextPts: Mat, status: Mat, err: Mat, winSize?: SizeLike, maxLevel?: int, criteria?: TermCriteriaLike, flags?: int, minEigThreshold?: double): void
/**
* @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration
* pattern.
*
* @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
* the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
* vector contains as many elements as the number of pattern views. If the same calibration pattern
* is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
* possible to use partially occluded patterns or even different patterns in different views. Then,
* the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
* XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
* In the old interface all the vectors of object points from different views are concatenated
* together.
* @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
* pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
* objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
* respectively. In the old interface all the vectors of object points from different views are
* concatenated together.
* @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
* @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
* \f$\cameramatrix{A}\f$ . If @ref CALIB_USE_INTRINSIC_GUESS
* and/or @ref CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Input/output vector of distortion coefficients
* \f$\distcoeffs\f$.
* @param rvecs Output vector of rotation vectors (@ref Rodrigues ) estimated for each pattern view
* (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
* i-th translation vector (see the next output parameter description) brings the calibration pattern
* from the object coordinate space (in which object points are specified) to the camera coordinate
* space. In more technical terms, the tuple of the i-th rotation and translation vector performs
* a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
* tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
* space.
* @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
* describtion above.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
* parameters. Order of deviations values:
* \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
* parameters. Order of deviations values: \f$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\f$ where M is
* the number of pattern views. \f$R_i, T_i\f$ are concatenated 1x3 vectors.
* @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
* @param flags Different flags that may be zero or a combination of the following values:
* - @ref CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
* fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
* center ( imageSize is used), and focal distances are computed in a least-squares fashion.
* Note, that if intrinsic parameters are known, there is no need to use this function just to
* estimate extrinsic parameters. Use solvePnP instead.
* - @ref CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
* optimization. It stays at the center or at a different location specified when
* @ref CALIB_USE_INTRINSIC_GUESS is set too.
* - @ref CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
* ratio fx/fy stays the same as in the input cameraMatrix . When
* @ref CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
* ignored, only their ratio is computed and used further.
* - @ref CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
* to zeros and stay zero.
* - @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 The corresponding radial distortion
* coefficient is not changed during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is
* set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
* - @ref CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
* backward compatibility, this extra flag should be explicitly specified to make the
* calibration function use the rational model and return 8 coefficients. If the flag is not
* set, the function computes and returns only 5 distortion coefficients.
* - @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
* backward compatibility, this extra flag should be explicitly specified to make the
* calibration function use the thin prism model and return 12 coefficients. If the flag is not
* set, the function computes and returns only 5 distortion coefficients.
* - @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
* the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
* supplied distCoeffs matrix is used. Otherwise, it is set to 0.
* - @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
* backward compatibility, this extra flag should be explicitly specified to make the
* calibration function use the tilted