update gitignore

This commit is contained in:
2026-01-20 20:00:16 +08:00
parent bc9f2824ed
commit 4882dc1a67
358 changed files with 1 additions and 161239 deletions

View File

@@ -1,199 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_ARUCO_BOARD_HPP
#define OPENCV_OBJDETECT_ARUCO_BOARD_HPP
#include <opencv2/core.hpp>
namespace cv {
namespace aruco {
//! @addtogroup objdetect_aruco
//! @{
class Dictionary;
/** @brief Board of ArUco markers
*
* A board is a set of markers in the 3D space with a common coordinate system.
* The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
* A Board object is composed by:
* - The object points of the marker corners, i.e. their coordinates respect to the board system.
* - The dictionary which indicates the type of markers of the board
* - The identifier of all the markers in the board.
*/
class CV_EXPORTS_W_SIMPLE Board {
public:
/** @brief Common Board constructor
*
* @param objPoints array of object points of all the marker corners in the board
* @param dictionary the dictionary of markers employed for this board
* @param ids vector of the identifiers of the markers in the board
*/
CV_WRAP Board(InputArrayOfArrays objPoints, const Dictionary& dictionary, InputArray ids);
/** @brief return the Dictionary of markers employed for this board
*/
CV_WRAP const Dictionary& getDictionary() const;
/** @brief return array of object points of all the marker corners in the board.
*
* Each marker include its 4 corners in this order:
* - objPoints[i][0] - left-top point of i-th marker
* - objPoints[i][1] - right-top point of i-th marker
* - objPoints[i][2] - right-bottom point of i-th marker
* - objPoints[i][3] - left-bottom point of i-th marker
*
* Markers are placed in a certain order - row by row, left to right in every row. For M markers, the size is Mx4.
*/
CV_WRAP const std::vector<std::vector<Point3f> >& getObjPoints() const;
/** @brief vector of the identifiers of the markers in the board (should be the same size as objPoints)
* @return vector of the identifiers of the markers
*/
CV_WRAP const std::vector<int>& getIds() const;
/** @brief get coordinate of the bottom right corner of the board, is set when calling the function create()
*/
CV_WRAP const Point3f& getRightBottomCorner() const;
/** @brief Given a board configuration and a set of detected markers, returns the corresponding
* image points and object points, can be used in solvePnP()
*
* @param detectedCorners List of detected marker corners of the board.
* For cv::Board and cv::GridBoard the method expects std::vector<std::vector<Point2f>> or std::vector<Mat> with Aruco marker corners.
* For cv::CharucoBoard the method expects std::vector<Point2f> or Mat with ChAruco corners (chess board corners matched with Aruco markers).
*
* @param detectedIds List of identifiers for each marker or charuco corner.
* For any Board class the method expects std::vector<int> or Mat.
*
* @param objPoints Vector of marker points in the board coordinate space.
* For any Board class the method expects std::vector<cv::Point3f> objectPoints or cv::Mat
*
* @param imgPoints Vector of marker points in the image coordinate space.
* For any Board class the method expects std::vector<cv::Point2f> objectPoints or cv::Mat
*
* @sa solvePnP
*/
CV_WRAP void matchImagePoints(InputArrayOfArrays detectedCorners, InputArray detectedIds,
OutputArray objPoints, OutputArray imgPoints) const;
/** @brief Draw a planar board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the board, ready to be printed.
*/
CV_WRAP void generateImage(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1) const;
CV_DEPRECATED_EXTERNAL // avoid using in C++ code, will be moved to "protected" (need to fix bindings first)
Board();
struct Impl;
protected:
Board(const Ptr<Impl>& impl);
Ptr<Impl> impl;
};
/** @brief Planar board with grid arrangement of markers
*
* More common type of board. All markers are placed in the same plane in a grid arrangement.
* The board image can be drawn using generateImage() method.
*/
class CV_EXPORTS_W_SIMPLE GridBoard : public Board {
public:
/**
* @brief GridBoard constructor
*
* @param size number of markers in x and y directions
* @param markerLength marker side length (normally in meters)
* @param markerSeparation separation between two markers (same unit as markerLength)
* @param dictionary dictionary of markers indicating the type of markers
* @param ids set of marker ids in dictionary to use on board.
*/
CV_WRAP GridBoard(const Size& size, float markerLength, float markerSeparation,
const Dictionary &dictionary, InputArray ids = noArray());
CV_WRAP Size getGridSize() const;
CV_WRAP float getMarkerLength() const;
CV_WRAP float getMarkerSeparation() const;
CV_DEPRECATED_EXTERNAL // avoid using in C++ code, will be moved to "protected" (need to fix bindings first)
GridBoard();
};
/**
* @brief ChArUco board is a planar chessboard where the markers are placed inside the white squares of a chessboard.
*
* The benefits of ChArUco boards is that they provide both, ArUco markers versatility and chessboard corner precision,
* which is important for calibration and pose estimation. The board image can be drawn using generateImage() method.
*/
class CV_EXPORTS_W_SIMPLE CharucoBoard : public Board {
public:
/** @brief CharucoBoard constructor
*
* @param size number of chessboard squares in x and y directions
* @param squareLength squareLength chessboard square side length (normally in meters)
* @param markerLength marker side length (same unit than squareLength)
* @param dictionary dictionary of markers indicating the type of markers
* @param ids array of id used markers
* The first markers in the dictionary are used to fill the white chessboard squares.
*/
CV_WRAP CharucoBoard(const Size& size, float squareLength, float markerLength,
const Dictionary &dictionary, InputArray ids = noArray());
/** @brief set legacy chessboard pattern.
*
* Legacy setting creates chessboard patterns starting with a white box in the upper left corner
* if there is an even row count of chessboard boxes, otherwise it starts with a black box.
* This setting ensures compatibility to patterns created with OpenCV versions prior OpenCV 4.6.0.
* See https://github.com/opencv/opencv/issues/23152.
*
* Default value: false.
*/
CV_WRAP void setLegacyPattern(bool legacyPattern);
CV_WRAP bool getLegacyPattern() const;
CV_WRAP Size getChessboardSize() const;
CV_WRAP float getSquareLength() const;
CV_WRAP float getMarkerLength() const;
/** @brief get CharucoBoard::chessboardCorners
*/
CV_WRAP std::vector<Point3f> getChessboardCorners() const;
/** @brief get CharucoBoard::nearestMarkerIdx, for each charuco corner, nearest marker index in ids array
*/
CV_PROP std::vector<std::vector<int> > getNearestMarkerIdx() const;
/** @brief get CharucoBoard::nearestMarkerCorners, for each charuco corner, nearest marker corner id of each marker
*/
CV_PROP std::vector<std::vector<int> > getNearestMarkerCorners() const;
/** @brief check whether the ChArUco markers are collinear
*
* @param charucoIds list of identifiers for each corner in charucoCorners per frame.
* @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not.
* solvePnP, calibration functions will fail if the corners are collinear (true).
*
* The number of ids in charucoIDs should be <= the number of chessboard corners in the board.
* This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false).
* Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases:
* for number of charucoIDs <= 2,the function returns true.
*/
CV_WRAP bool checkCharucoCornersCollinear(InputArray charucoIds) const;
CV_DEPRECATED_EXTERNAL // avoid using in C++ code, will be moved to "protected" (need to fix bindings first)
CharucoBoard();
};
//! @}
}
}
#endif

View File

@@ -1,489 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_ARUCO_DETECTOR_HPP
#define OPENCV_OBJDETECT_ARUCO_DETECTOR_HPP
#include <opencv2/objdetect/aruco_dictionary.hpp>
#include <opencv2/objdetect/aruco_board.hpp>
namespace cv {
namespace aruco {
//! @addtogroup objdetect_aruco
//! @{
enum CornerRefineMethod{
CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach
CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy
CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting
CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros
};
/** @brief struct DetectorParameters is used by ArucoDetector
*/
struct CV_EXPORTS_W_SIMPLE DetectorParameters {
CV_WRAP DetectorParameters() {
adaptiveThreshWinSizeMin = 3;
adaptiveThreshWinSizeMax = 23;
adaptiveThreshWinSizeStep = 10;
adaptiveThreshConstant = 7;
minMarkerPerimeterRate = 0.03;
maxMarkerPerimeterRate = 4.;
polygonalApproxAccuracyRate = 0.03;
minCornerDistanceRate = 0.05;
minDistanceToBorder = 3;
minMarkerDistanceRate = 0.125;
cornerRefinementMethod = (int)CORNER_REFINE_NONE;
cornerRefinementWinSize = 5;
relativeCornerRefinmentWinSize = 0.3f;
cornerRefinementMaxIterations = 30;
cornerRefinementMinAccuracy = 0.1;
markerBorderBits = 1;
perspectiveRemovePixelPerCell = 4;
perspectiveRemoveIgnoredMarginPerCell = 0.13;
maxErroneousBitsInBorderRate = 0.35;
minOtsuStdDev = 5.0;
errorCorrectionRate = 0.6;
aprilTagQuadDecimate = 0.0;
aprilTagQuadSigma = 0.0;
aprilTagMinClusterPixels = 5;
aprilTagMaxNmaxima = 10;
aprilTagCriticalRad = (float)(10* CV_PI /180);
aprilTagMaxLineFitMse = 10.0;
aprilTagMinWhiteBlackDiff = 5;
aprilTagDeglitch = 0;
detectInvertedMarker = false;
useAruco3Detection = false;
minSideLengthCanonicalImg = 32;
minMarkerLengthRatioOriginalImg = 0.0;
}
/** @brief Read a new set of DetectorParameters from FileNode (use FileStorage.root()).
*/
CV_WRAP bool readDetectorParameters(const FileNode& fn);
/** @brief Write a set of DetectorParameters to FileStorage
*/
CV_WRAP bool writeDetectorParameters(FileStorage& fs, const String& name = String());
/// minimum window size for adaptive thresholding before finding contours (default 3).
CV_PROP_RW int adaptiveThreshWinSizeMin;
/// maximum window size for adaptive thresholding before finding contours (default 23).
CV_PROP_RW int adaptiveThreshWinSizeMax;
/// increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax during the thresholding (default 10).
CV_PROP_RW int adaptiveThreshWinSizeStep;
/// constant for adaptive thresholding before finding contours (default 7)
CV_PROP_RW double adaptiveThreshConstant;
/** @brief determine minimum perimeter for marker contour to be detected.
*
* This is defined as a rate respect to the maximum dimension of the input image (default 0.03).
*/
CV_PROP_RW double minMarkerPerimeterRate;
/** @brief determine maximum perimeter for marker contour to be detected.
*
* This is defined as a rate respect to the maximum dimension of the input image (default 4.0).
*/
CV_PROP_RW double maxMarkerPerimeterRate;
/// minimum accuracy during the polygonal approximation process to determine which contours are squares. (default 0.03)
CV_PROP_RW double polygonalApproxAccuracyRate;
/// minimum distance between corners for detected markers relative to its perimeter (default 0.05)
CV_PROP_RW double minCornerDistanceRate;
/// minimum distance of any corner to the image border for detected markers (in pixels) (default 3)
CV_PROP_RW int minDistanceToBorder;
/** @brief minimum average distance between the corners of the two markers to be grouped (default 0.125).
*
* The rate is relative to the smaller perimeter of the two markers.
* Two markers are grouped if average distance between the corners of the two markers is less than
* min(MarkerPerimeter1, MarkerPerimeter2)*minMarkerDistanceRate.
*
* default value is 0.125 because 0.125*MarkerPerimeter = (MarkerPerimeter / 4) * 0.5 = half the side of the marker.
*
* @note default value was changed from 0.05 after 4.8.1 release, because the filtering algorithm has been changed.
* Now a few candidates from the same group can be added to the list of candidates if they are far from each other.
* @sa minGroupDistance.
*/
CV_PROP_RW double minMarkerDistanceRate;
/** @brief minimum average distance between the corners of the two markers in group to add them to the list of candidates
*
* The average distance between the corners of the two markers is calculated relative to its module size (default 0.21).
*/
CV_PROP_RW float minGroupDistance = 0.21f;
/** @brief default value CORNER_REFINE_NONE */
CV_PROP_RW int cornerRefinementMethod;
/** @brief maximum window size for the corner refinement process (in pixels) (default 5).
*
* The window size may decrease if the ArUco marker is too small, check relativeCornerRefinmentWinSize.
* The final window size is calculated as:
* min(cornerRefinementWinSize, averageArucoModuleSize*relativeCornerRefinmentWinSize),
* where averageArucoModuleSize is average module size of ArUco marker in pixels.
* (ArUco marker is composed of black and white modules)
*/
CV_PROP_RW int cornerRefinementWinSize;
/** @brief Dynamic window size for corner refinement relative to Aruco module size (default 0.3).
*
* The final window size is calculated as:
* min(cornerRefinementWinSize, averageArucoModuleSize*relativeCornerRefinmentWinSize),
* where averageArucoModuleSize is average module size of ArUco marker in pixels.
* (ArUco marker is composed of black and white modules)
* In the case of markers located far from each other, it may be useful to increase the value of the parameter to 0.4-0.5.
* In the case of markers located close to each other, it may be useful to decrease the parameter value to 0.1-0.2.
*/
CV_PROP_RW float relativeCornerRefinmentWinSize;
/// maximum number of iterations for stop criteria of the corner refinement process (default 30).
CV_PROP_RW int cornerRefinementMaxIterations;
/// minimum error for the stop cristeria of the corner refinement process (default: 0.1)
CV_PROP_RW double cornerRefinementMinAccuracy;
/// number of bits of the marker border, i.e. marker border width (default 1).
CV_PROP_RW int markerBorderBits;
/// number of bits (per dimension) for each cell of the marker when removing the perspective (default 4).
CV_PROP_RW int perspectiveRemovePixelPerCell;
/** @brief width of the margin of pixels on each cell not considered for the determination of the cell bit.
*
* Represents the rate respect to the total size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13)
*/
CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell;
/** @brief maximum number of accepted erroneous bits in the border (i.e. number of allowed white bits in the border).
*
* Represented as a rate respect to the total number of bits per marker (default 0.35).
*/
CV_PROP_RW double maxErroneousBitsInBorderRate;
/** @brief minimun standard deviation in pixels values during the decodification step to apply Otsu
* thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0)
*/
CV_PROP_RW double minOtsuStdDev;
/// error correction rate respect to the maximun error correction capability for each dictionary (default 0.6).
CV_PROP_RW double errorCorrectionRate;
/** @brief April :: User-configurable parameters.
*
* Detection of quads can be done on a lower-resolution image, improving speed at a cost of
* pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
*/
CV_PROP_RW float aprilTagQuadDecimate;
/// what Gaussian blur should be applied to the segmented image (used for quad detection?)
CV_PROP_RW float aprilTagQuadSigma;
// April :: Internal variables
/// reject quads containing too few pixels (default 5).
CV_PROP_RW int aprilTagMinClusterPixels;
/// how many corner candidates to consider when segmenting a group of pixels into a quad (default 10).
CV_PROP_RW int aprilTagMaxNmaxima;
/** @brief reject quads where pairs of edges have angles that are close to straight or close to 180 degrees.
*
* Zero means that no quads are rejected. (In radians) (default 10*PI/180)
*/
CV_PROP_RW float aprilTagCriticalRad;
/// when fitting lines to the contours, what is the maximum mean squared error
CV_PROP_RW float aprilTagMaxLineFitMse;
/** @brief add an extra check that the white model must be (overall) brighter than the black model.
*
* When we build our model of black & white pixels, we add an extra check that the white model must be (overall)
* brighter than the black model. How much brighter? (in pixel values, [0,255]), (default 5)
*/
CV_PROP_RW int aprilTagMinWhiteBlackDiff;
/// should the thresholded image be deglitched? Only useful for very noisy images (default 0).
CV_PROP_RW int aprilTagDeglitch;
/** @brief to check if there is a white marker.
*
* In order to generate a "white" marker just invert a normal marker by using a tilde, ~markerImage. (default false)
*/
CV_PROP_RW bool detectInvertedMarker;
/** @brief enable the new and faster Aruco detection strategy.
*
* Proposed in the paper:
* Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018)
* https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers
*/
CV_PROP_RW bool useAruco3Detection;
/// minimum side length of a marker in the canonical image. Latter is the binarized image in which contours are searched.
CV_PROP_RW int minSideLengthCanonicalImg;
/// range [0,1], eq (2) from paper. The parameter tau_i has a direct influence on the processing speed.
CV_PROP_RW float minMarkerLengthRatioOriginalImg;
};
/** @brief struct RefineParameters is used by ArucoDetector
*/
struct CV_EXPORTS_W_SIMPLE RefineParameters {
CV_WRAP RefineParameters(float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true);
/** @brief Read a new set of RefineParameters from FileNode (use FileStorage.root()).
*/
CV_WRAP bool readRefineParameters(const FileNode& fn);
/** @brief Write a set of RefineParameters to FileStorage
*/
CV_WRAP bool writeRefineParameters(FileStorage& fs, const String& name = String());
/** @brief minRepDistance minimum distance between the corners of the rejected candidate and the reprojected marker
in order to consider it as a correspondence.
*/
CV_PROP_RW float minRepDistance;
/** @brief errorCorrectionRate rate of allowed erroneous bits respect to the error correction capability of the used dictionary.
*
* -1 ignores the error correction step.
*/
CV_PROP_RW float errorCorrectionRate;
/** @brief checkAllOrders consider the four posible corner orders in the rejectedCorners array.
*
* If it set to false, only the provided corner order is considered (default true).
*/
CV_PROP_RW bool checkAllOrders;
};
/** @brief The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method.
*
* After detecting some markers in the image, you can try to find undetected markers from this dictionary with
* refineDetectedMarkers() method.
*
* @see DetectorParameters, RefineParameters
*/
class CV_EXPORTS_W ArucoDetector : public Algorithm
{
public:
/** @brief Basic ArucoDetector constructor
*
* @param dictionary indicates the type of markers that will be searched
* @param detectorParams marker detection parameters
* @param refineParams marker refine detection parameters
*/
CV_WRAP ArucoDetector(const Dictionary &dictionary = getPredefinedDictionary(cv::aruco::DICT_4X4_50),
const DetectorParameters &detectorParams = DetectorParameters(),
const RefineParameters& refineParams = RefineParameters());
/** @brief ArucoDetector constructor for multiple dictionaries
*
* @param dictionaries indicates the type of markers that will be searched. Empty dictionaries will throw an error.
* @param detectorParams marker detection parameters
* @param refineParams marker refine detection parameters
*/
CV_WRAP ArucoDetector(const std::vector<Dictionary> &dictionaries,
const DetectorParameters &detectorParams = DetectorParameters(),
const RefineParameters& refineParams = RefineParameters());
/** @brief Basic marker detection
*
* @param image input image
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
* correct codification. Useful for debugging purposes.
*
* Performs marker detection in the input image. Only markers included in the first specified dictionary
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* @note The function does not correct lens distortion or takes it into account. It's recommended to undistort
* input image with corresponding camera model, if camera parameters are known
* @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard
*/
CV_WRAP void detectMarkers(InputArray image, OutputArrayOfArrays corners, OutputArray ids,
OutputArrayOfArrays rejectedImgPoints = noArray()) const;
/** @brief Marker detection with confidence computation
*
* @param image input image
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* @param markersConfidence contains the normalized confidence [0;1] of the markers' detection,
* defined as 1 minus the normalized uncertainty (percentage of incorrect pixel detections),
* with 1 describing a pixel perfect detection. The confidence values are of type float
* (e.g. std::vector<float>)
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
* correct codification. Useful for debugging purposes.
*
* Performs marker detection in the input image. Only markers included in the first specified dictionary
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* @note The function does not correct lens distortion or takes it into account. It's recommended to undistort
* input image with corresponding camera model, if camera parameters are known
* @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard
*/
CV_WRAP void detectMarkersWithConfidence(InputArray image, OutputArrayOfArrays corners, OutputArray ids, OutputArray markersConfidence,
OutputArrayOfArrays rejectedImgPoints = noArray()) const;
/** @brief Refine not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs optional vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
* original rejectedCorners array.
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
* @note This function assumes that the board only contains markers from one dictionary, so only the
* first configured dictionary is used. It has to match the dictionary of the board to work properly.
*/
CV_WRAP void refineDetectedMarkers(InputArray image, const Board &board,
InputOutputArrayOfArrays detectedCorners,
InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
OutputArray recoveredIdxs = noArray()) const;
/** @brief Basic marker detection
*
* @param image input image
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
* correct codification. Useful for debugging purposes.
* @param dictIndices vector of dictionary indices for each detected marker. Use getDictionaries() to get the
* list of corresponding dictionaries.
*
* Performs marker detection in the input image. Only markers included in the specific dictionaries
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* @note The function does not correct lens distortion or takes it into account. It's recommended to undistort
* input image with corresponding camera model, if camera parameters are known
* @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard
*/
CV_WRAP void detectMarkersMultiDict(InputArray image, OutputArrayOfArrays corners, OutputArray ids,
OutputArrayOfArrays rejectedImgPoints = noArray(), OutputArray dictIndices = noArray()) const;
/** @brief Returns first dictionary from internal list used for marker detection.
*
* @return The first dictionary from the configured ArucoDetector.
*/
CV_WRAP const Dictionary& getDictionary() const;
/** @brief Sets and replaces the first dictionary in internal list to be used for marker detection.
*
* @param dictionary The new dictionary that will replace the first dictionary in the internal list.
*/
CV_WRAP void setDictionary(const Dictionary& dictionary);
/** @brief Returns all dictionaries currently used for marker detection as a vector.
*
* @return A std::vector<Dictionary> containing all dictionaries used by the ArucoDetector.
*/
CV_WRAP std::vector<Dictionary> getDictionaries() const;
/** @brief Sets the entire collection of dictionaries to be used for marker detection, replacing any existing dictionaries.
*
* @param dictionaries A std::vector<Dictionary> containing the new set of dictionaries to be used.
*
* Configures the ArucoDetector to use the provided vector of dictionaries for marker detection.
* This method replaces any dictionaries that were previously set.
* @note Setting an empty vector of dictionaries will throw an error.
*/
CV_WRAP void setDictionaries(const std::vector<Dictionary>& dictionaries);
CV_WRAP const DetectorParameters& getDetectorParameters() const;
CV_WRAP void setDetectorParameters(const DetectorParameters& detectorParameters);
CV_WRAP const RefineParameters& getRefineParameters() const;
CV_WRAP void setRefineParameters(const RefineParameters& refineParameters);
/** @brief Stores algorithm parameters in a file storage
*/
virtual void write(FileStorage& fs) const override;
/** @brief simplified API for language bindings
*/
CV_WRAP inline void write(FileStorage& fs, const String& name) { Algorithm::write(fs, name); }
/** @brief Reads algorithm parameters from a file storage
*/
CV_WRAP virtual void read(const FileNode& fn) override;
protected:
struct ArucoDetectorImpl;
Ptr<ArucoDetectorImpl> arucoDetectorImpl;
};
/** @brief Draw detected markers in image
*
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not altered.
* @param corners positions of marker corners on input image.
* (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
* this array should be Nx4. The order of the corners should be clockwise.
* @param ids vector of identifiers for markers in markersCorners .
* Optional, if not provided, ids are not painted.
* @param borderColor color of marker borders. Rest of colors (text color and first corner color)
* are calculated based on this one to improve visualization.
*
* Given an array of detected marker corners and its corresponding ids, this functions draws
* the markers in the image. The marker borders are painted and the markers identifiers if provided.
* Useful for debugging purposes.
*/
CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,
InputArray ids = noArray(), Scalar borderColor = Scalar(0, 255, 0));
/** @brief Generate a canonical marker image
*
* @param dictionary dictionary of markers indicating the type of markers
* @param id identifier of the marker that will be returned. It has to be a valid id in the specified dictionary.
* @param sidePixels size of the image in pixels
* @param img output image with the marker
* @param borderBits width of the marker border.
*
* This function returns a marker image in its canonical form (i.e. ready to be printed)
*/
CV_EXPORTS_W void generateImageMarker(const Dictionary &dictionary, int id, int sidePixels, OutputArray img,
int borderBits = 1);
//! @}
}
}
#endif

View File

@@ -1,154 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_DICTIONARY_HPP
#define OPENCV_OBJDETECT_DICTIONARY_HPP
#include <opencv2/core.hpp>
namespace cv {
namespace aruco {
//! @addtogroup objdetect_aruco
//! @{
/** @brief Dictionary is a set of unique ArUco markers of the same size
*
* `bytesList` storing as 2-dimensions Mat with 4-th channels (CV_8UC4 type was used) and contains the marker codewords where:
* - bytesList.rows is the dictionary size
* - each marker is encoded using `nbytes = ceil(markerSize*markerSize/8.)` bytes
* - each row contains all 4 rotations of the marker, so its length is `4*nbytes`
* - the byte order in the bytesList[i] row:
* `//bytes without rotation/bytes with rotation 1/bytes with rotation 2/bytes with rotation 3//`
* So `bytesList.ptr(i)[k*nbytes + j]` is the j-th byte of i-th marker, in its k-th rotation.
* @note Python bindings generate matrix with shape of bytesList `dictionary_size x nbytes x 4`,
* but it should be indexed like C++ version. Python example for j-th byte of i-th marker, in its k-th rotation:
* `aruco_dict.bytesList[id].ravel()[k*nbytes + j]`
*/
class CV_EXPORTS_W_SIMPLE Dictionary {
public:
CV_PROP_RW Mat bytesList; ///< marker code information. See class description for more details
CV_PROP_RW int markerSize; ///< number of bits per dimension
CV_PROP_RW int maxCorrectionBits; ///< maximum number of bits that can be corrected
CV_WRAP Dictionary();
/** @brief Basic ArUco dictionary constructor
*
* @param bytesList bits for all ArUco markers in dictionary see memory layout in the class description
* @param _markerSize ArUco marker size in units
* @param maxcorr maximum number of bits that can be corrected
*/
CV_WRAP Dictionary(const Mat &bytesList, int _markerSize, int maxcorr = 0);
/** @brief Read a new dictionary from FileNode.
*
* Dictionary example in YAML format:\n
* nmarkers: 35\n
* markersize: 6\n
* maxCorrectionBits: 5\n
* marker_0: "101011111011111001001001101100000000"\n
* ...\n
* marker_34: "011111010000111011111110110101100101"
*/
CV_WRAP bool readDictionary(const cv::FileNode& fn);
/** @brief Write a dictionary to FileStorage, format is the same as in readDictionary().
*/
CV_WRAP void writeDictionary(FileStorage& fs, const String& name = String());
/** @brief Given a matrix of bits. Returns whether if marker is identified or not.
*
* Returns reference to the marker id in the dictionary (if any) and its rotation.
*/
CV_WRAP bool identify(const Mat &onlyBits, CV_OUT int &idx, CV_OUT int &rotation, double maxCorrectionRate) const;
/** @brief Returns Hamming distance of the input bits to the specific id.
*
* If `allRotations` flag is set, the four posible marker rotations are considered
*/
CV_WRAP int getDistanceToId(InputArray bits, int id, bool allRotations = true) const;
/** @brief Generate a canonical marker image
*/
CV_WRAP void generateImageMarker(int id, int sidePixels, OutputArray _img, int borderBits = 1) const;
/** @brief Transform matrix of bits to list of bytes with 4 marker rotations
*/
CV_WRAP static Mat getByteListFromBits(const Mat &bits);
/** @brief Transform list of bytes to matrix of bits
*/
CV_WRAP static Mat getBitsFromByteList(const Mat &byteList, int markerSize, int rotationId = 0);
};
/** @brief Predefined markers dictionaries/sets
*
* Each dictionary indicates the number of bits and the number of markers contained
* - DICT_ARUCO_ORIGINAL: standard ArUco Library Markers. 1024 markers, 5x5 bits, 0 minimum
distance
*/
enum PredefinedDictionaryType {
DICT_4X4_50 = 0, ///< 4x4 bits, minimum hamming distance between any two codes = 4, 50 codes
DICT_4X4_100, ///< 4x4 bits, minimum hamming distance between any two codes = 3, 100 codes
DICT_4X4_250, ///< 4x4 bits, minimum hamming distance between any two codes = 3, 250 codes
DICT_4X4_1000, ///< 4x4 bits, minimum hamming distance between any two codes = 2, 1000 codes
DICT_5X5_50, ///< 5x5 bits, minimum hamming distance between any two codes = 8, 50 codes
DICT_5X5_100, ///< 5x5 bits, minimum hamming distance between any two codes = 7, 100 codes
DICT_5X5_250, ///< 5x5 bits, minimum hamming distance between any two codes = 6, 250 codes
DICT_5X5_1000, ///< 5x5 bits, minimum hamming distance between any two codes = 5, 1000 codes
DICT_6X6_50, ///< 6x6 bits, minimum hamming distance between any two codes = 13, 50 codes
DICT_6X6_100, ///< 6x6 bits, minimum hamming distance between any two codes = 12, 100 codes
DICT_6X6_250, ///< 6x6 bits, minimum hamming distance between any two codes = 11, 250 codes
DICT_6X6_1000, ///< 6x6 bits, minimum hamming distance between any two codes = 9, 1000 codes
DICT_7X7_50, ///< 7x7 bits, minimum hamming distance between any two codes = 19, 50 codes
DICT_7X7_100, ///< 7x7 bits, minimum hamming distance between any two codes = 18, 100 codes
DICT_7X7_250, ///< 7x7 bits, minimum hamming distance between any two codes = 17, 250 codes
DICT_7X7_1000, ///< 7x7 bits, minimum hamming distance between any two codes = 14, 1000 codes
DICT_ARUCO_ORIGINAL, ///< 6x6 bits, minimum hamming distance between any two codes = 3, 1024 codes
DICT_APRILTAG_16h5, ///< 4x4 bits, minimum hamming distance between any two codes = 5, 30 codes
DICT_APRILTAG_25h9, ///< 5x5 bits, minimum hamming distance between any two codes = 9, 35 codes
DICT_APRILTAG_36h10, ///< 6x6 bits, minimum hamming distance between any two codes = 10, 2320 codes
DICT_APRILTAG_36h11, ///< 6x6 bits, minimum hamming distance between any two codes = 11, 587 codes
DICT_ARUCO_MIP_36h12 ///< 6x6 bits, minimum hamming distance between any two codes = 12, 250 codes
};
/** @brief Returns one of the predefined dictionaries defined in PredefinedDictionaryType
*/
CV_EXPORTS Dictionary getPredefinedDictionary(PredefinedDictionaryType name);
/** @brief Returns one of the predefined dictionaries referenced by DICT_*.
*/
CV_EXPORTS_W Dictionary getPredefinedDictionary(int dict);
/** @brief Extend base dictionary by new nMarkers
*
* @param nMarkers number of markers in the dictionary
* @param markerSize number of bits per dimension of each markers
* @param baseDictionary Include the markers in this dictionary at the beginning (optional)
* @param randomSeed a user supplied seed for theRNG()
*
* This function creates a new dictionary composed by nMarkers markers and each markers composed
* by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
* included and the rest are generated based on them. If the size of baseDictionary is higher
* than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.
*/
CV_EXPORTS_W Dictionary extendDictionary(int nMarkers, int markerSize, const Dictionary &baseDictionary = Dictionary(),
int randomSeed=0);
//! @}
}
}
#endif

View File

@@ -1,111 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (c) 2020-2021 darkliang wangberlinT Certseeds
#ifndef OPENCV_OBJDETECT_BARCODE_HPP
#define OPENCV_OBJDETECT_BARCODE_HPP
#include <opencv2/core.hpp>
#include <opencv2/objdetect/graphical_code_detector.hpp>
namespace cv {
namespace barcode {
//! @addtogroup objdetect_barcode
//! @{
class CV_EXPORTS_W_SIMPLE BarcodeDetector : public cv::GraphicalCodeDetector
{
public:
/** @brief Initialize the BarcodeDetector.
*/
CV_WRAP BarcodeDetector();
/** @brief Initialize the BarcodeDetector.
*
* Parameters allow to load _optional_ Super Resolution DNN model for better quality.
* @param prototxt_path prototxt file path for the super resolution model
* @param model_path model file path for the super resolution model
*/
CV_WRAP BarcodeDetector(CV_WRAP_FILE_PATH const std::string &prototxt_path, CV_WRAP_FILE_PATH const std::string &model_path);
~BarcodeDetector();
/** @brief Decodes barcode in image once it's found by the detect() method.
*
* @param img grayscale or color (BGR) image containing bar code.
* @param points vector of rotated rectangle vertices found by detect() method (or some other algorithm).
* For N detected barcodes, the dimensions of this array should be [N][4].
* Order of four points in vector<Point2f> is bottomLeft, topLeft, topRight, bottomRight.
* @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector strings, specifies the type of these barcodes
* @return true if at least one valid barcode have been found
*/
CV_WRAP bool decodeWithType(InputArray img,
InputArray points,
CV_OUT std::vector<std::string> &decoded_info,
CV_OUT std::vector<std::string> &decoded_type) const;
/** @brief Both detects and decodes barcode
* @param img grayscale or color (BGR) image containing barcode.
* @param decoded_info UTF8-encoded output vector of string(s) or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector of strings, specifies the type of these barcodes
* @param points optional output vector of vertices of the found barcode rectangle. Will be empty if not found.
* @return true if at least one valid barcode have been found
*/
CV_WRAP bool detectAndDecodeWithType(InputArray img,
CV_OUT std::vector<std::string> &decoded_info,
CV_OUT std::vector<std::string> &decoded_type,
OutputArray points = noArray()) const;
/** @brief Get detector downsampling threshold.
*
* @return detector downsampling threshold
*/
CV_WRAP double getDownsamplingThreshold() const;
/** @brief Set detector downsampling threshold.
*
* By default, the detect method resizes the input image to this limit if the smallest image size is is greater than the threshold.
* Increasing this value can improve detection accuracy and the number of results at the expense of performance.
* Correlates with detector scales. Setting this to a large value will disable downsampling.
* @param thresh downsampling limit to apply (default 512)
* @see setDetectorScales
*/
CV_WRAP BarcodeDetector& setDownsamplingThreshold(double thresh);
/** @brief Returns detector box filter sizes.
*
* @param sizes output parameter for returning the sizes.
*/
CV_WRAP void getDetectorScales(CV_OUT std::vector<float>& sizes) const;
/** @brief Set detector box filter sizes.
*
* Adjusts the value and the number of box filters used in the detect step.
* The filter sizes directly correlate with the expected line widths for a barcode. Corresponds to expected barcode distance.
* If the downsampling limit is increased, filter sizes need to be adjusted in an inversely proportional way.
* @param sizes box filter sizes, relative to minimum dimension of the image (default [0.01, 0.03, 0.06, 0.08])
*/
CV_WRAP BarcodeDetector& setDetectorScales(const std::vector<float>& sizes);
/** @brief Get detector gradient magnitude threshold.
*
* @return detector gradient magnitude threshold.
*/
CV_WRAP double getGradientThreshold() const;
/** @brief Set detector gradient magnitude threshold.
*
* Sets the coherence threshold for detected bounding boxes.
* Increasing this value will generate a closer fitted bounding box width and can reduce false-positives.
* Values between 16 and 1024 generally work, while too high of a value will remove valid detections.
* @param thresh gradient magnitude threshold (default 64).
*/
CV_WRAP BarcodeDetector& setGradientThreshold(double thresh);
};
//! @}
}} // cv::barcode::
#endif // OPENCV_OBJDETECT_BARCODE_HPP

View File

@@ -1,161 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_CHARUCO_DETECTOR_HPP
#define OPENCV_OBJDETECT_CHARUCO_DETECTOR_HPP
#include "opencv2/objdetect/aruco_detector.hpp"
namespace cv {
namespace aruco {
//! @addtogroup objdetect_aruco
//! @{
struct CV_EXPORTS_W_SIMPLE CharucoParameters {
CV_WRAP CharucoParameters() {
minMarkers = 2;
tryRefineMarkers = false;
checkMarkers = true;
}
/// cameraMatrix optional 3x3 floating-point camera matrix
CV_PROP_RW Mat cameraMatrix;
/// distCoeffs optional vector of distortion coefficients
CV_PROP_RW Mat distCoeffs;
/// minMarkers number of adjacent markers that must be detected to return a charuco corner, default = 2
CV_PROP_RW int minMarkers;
/// try to use refine board, default false
CV_PROP_RW bool tryRefineMarkers;
/// run check to verify that markers belong to the same board, default true
CV_PROP_RW bool checkMarkers;
};
class CV_EXPORTS_W CharucoDetector : public Algorithm {
public:
/** @brief Basic CharucoDetector constructor
*
* @param board ChAruco board
* @param charucoParams charuco detection parameters
* @param detectorParams marker detection parameters
* @param refineParams marker refine detection parameters
*/
CV_WRAP CharucoDetector(const CharucoBoard& board,
const CharucoParameters& charucoParams = CharucoParameters(),
const DetectorParameters &detectorParams = DetectorParameters(),
const RefineParameters& refineParams = RefineParameters());
CV_WRAP const CharucoBoard& getBoard() const;
CV_WRAP void setBoard(const CharucoBoard& board);
CV_WRAP const CharucoParameters& getCharucoParameters() const;
CV_WRAP void setCharucoParameters(CharucoParameters& charucoParameters);
CV_WRAP const DetectorParameters& getDetectorParameters() const;
CV_WRAP void setDetectorParameters(const DetectorParameters& detectorParameters);
CV_WRAP const RefineParameters& getRefineParameters() const;
CV_WRAP void setRefineParameters(const RefineParameters& refineParameters);
/**
* @brief detect aruco markers and interpolate position of ChArUco board corners
* @param image input image necessary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param charucoCorners interpolated chessboard corners.
* @param charucoIds interpolated chessboard corners identifiers.
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* @param markerIds list of identifiers for each marker in corners.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers.
*
* If markerCorners and markerCorners are empty, the detectMarkers() will run and detect aruco markers and ids.
*
* If camera parameters are provided, the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is also returned in charucoIds.
* @sa findChessboardCorners
* @note After OpenCV 4.6.0, there was an incompatible change in the ChArUco pattern generation algorithm for even row counts.
* Use cv::aruco::CharucoBoard::setLegacyPattern() to ensure compatibility with patterns created using OpenCV versions prior to 4.6.0.
* For more information, see the issue: https://github.com/opencv/opencv/issues/23152
*/
CV_WRAP void detectBoard(InputArray image, OutputArray charucoCorners, OutputArray charucoIds,
InputOutputArrayOfArrays markerCorners = noArray(),
InputOutputArray markerIds = noArray()) const;
/**
* @brief Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param markerCorners list of detected marker corners from detectMarkers function.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* @param markerIds list of marker ids in markerCorners.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*/
CV_WRAP void detectDiamonds(InputArray image, OutputArrayOfArrays diamondCorners, OutputArray diamondIds,
InputOutputArrayOfArrays markerCorners = noArray(),
InputOutputArray markerIds = noArray()) const;
protected:
struct CharucoDetectorImpl;
Ptr<CharucoDetectorImpl> charucoDetectorImpl;
};
/**
* @brief Draws a set of Charuco corners
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not
* altered.
* @param charucoCorners vector of detected charuco corners
* @param charucoIds list of identifiers for each corner in charucoCorners
* @param cornerColor color of the square surrounding each corner
*
* This function draws a set of detected Charuco corners. If identifiers vector is provided, it also
* draws the id of each corner.
*/
CV_EXPORTS_W void drawDetectedCornersCharuco(InputOutputArray image, InputArray charucoCorners,
InputArray charucoIds = noArray(), Scalar cornerColor = Scalar(255, 0, 0));
/**
* @brief Draw a set of detected ChArUco Diamond markers
*
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not
* altered.
* @param diamondCorners positions of diamond corners in the same format returned by
* detectCharucoDiamond(). (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param diamondIds vector of identifiers for diamonds in diamondCorners, in the same format
* returned by detectCharucoDiamond() (e.g. std::vector<Vec4i>).
* Optional, if not provided, ids are not painted.
* @param borderColor color of marker borders. Rest of colors (text color and first corner color)
* are calculated based on this one.
*
* Given an array of detected diamonds, this functions draws them in the image. The marker borders
* are painted and the markers identifiers if provided.
* Useful for debugging purposes.
*/
CV_EXPORTS_W void drawDetectedDiamonds(InputOutputArray image, InputArrayOfArrays diamondCorners,
InputArray diamondIds = noArray(),
Scalar borderColor = Scalar(0, 0, 255));
//! @}
}
}
#endif

View File

@@ -1,222 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_OBJDETECT_DBT_HPP
#define OPENCV_OBJDETECT_DBT_HPP
#include <opencv2/core.hpp>
#include <vector>
namespace cv
{
//! @addtogroup objdetect_cascade_classifier
//! @{
class CV_EXPORTS DetectionBasedTracker
{
public:
struct CV_EXPORTS Parameters
{
int maxTrackLifetime;
int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0
Parameters();
};
class IDetector
{
public:
IDetector():
minObjSize(96, 96),
maxObjSize(INT_MAX, INT_MAX),
minNeighbours(2),
scaleFactor(1.1f)
{}
virtual void detect(const cv::Mat& image, std::vector<cv::Rect>& objects) = 0;
void setMinObjectSize(const cv::Size& min)
{
minObjSize = min;
}
void setMaxObjectSize(const cv::Size& max)
{
maxObjSize = max;
}
cv::Size getMinObjectSize() const
{
return minObjSize;
}
cv::Size getMaxObjectSize() const
{
return maxObjSize;
}
float getScaleFactor()
{
return scaleFactor;
}
void setScaleFactor(float value)
{
scaleFactor = value;
}
int getMinNeighbours()
{
return minNeighbours;
}
void setMinNeighbours(int value)
{
minNeighbours = value;
}
virtual ~IDetector() {}
protected:
cv::Size minObjSize;
cv::Size maxObjSize;
int minNeighbours;
float scaleFactor;
};
DetectionBasedTracker(cv::Ptr<IDetector> mainDetector, cv::Ptr<IDetector> trackingDetector, const Parameters& params);
virtual ~DetectionBasedTracker();
virtual bool run();
virtual void stop();
virtual void resetTracking();
virtual void process(const cv::Mat& imageGray);
bool setParameters(const Parameters& params);
const Parameters& getParameters() const;
typedef std::pair<cv::Rect, int> Object;
virtual void getObjects(std::vector<cv::Rect>& result) const;
virtual void getObjects(std::vector<Object>& result) const;
enum ObjectStatus
{
DETECTED_NOT_SHOWN_YET,
DETECTED,
DETECTED_TEMPORARY_LOST,
WRONG_OBJECT
};
struct ExtObject
{
int id;
cv::Rect location;
ObjectStatus status;
ExtObject(int _id, cv::Rect _location, ObjectStatus _status)
:id(_id), location(_location), status(_status)
{
}
};
virtual void getObjects(std::vector<ExtObject>& result) const;
virtual int addObject(const cv::Rect& location); //returns id of the new object
protected:
class SeparateDetectionWork;
cv::Ptr<SeparateDetectionWork> separateDetectionWork;
friend void* workcycleObjectDetectorFunction(void* p);
struct InnerParameters
{
int numLastPositionsToTrack;
int numStepsToWaitBeforeFirstShow;
int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown;
int numStepsToShowWithoutDetecting;
float coeffTrackingWindowSize;
float coeffObjectSizeToTrack;
float coeffObjectSpeedUsingInPrediction;
InnerParameters();
};
Parameters parameters;
InnerParameters innerParameters;
struct TrackedObject
{
typedef std::vector<cv::Rect> PositionsVector;
PositionsVector lastPositions;
int numDetectedFrames;
int numFramesNotDetected;
int id;
TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0)
{
lastPositions.push_back(rect);
id=getNextId();
}
static int getNextId()
{
static int _id=0;
return _id++;
}
};
int numTrackedSteps;
std::vector<TrackedObject> trackedObjects;
std::vector<float> weightsPositionsSmoothing;
std::vector<float> weightsSizesSmoothing;
cv::Ptr<IDetector> cascadeForTracking;
void updateTrackedObjects(const std::vector<cv::Rect>& detectedObjects);
cv::Rect calcTrackedObjectPositionToShow(int i) const;
cv::Rect calcTrackedObjectPositionToShow(int i, ObjectStatus& status) const;
void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector<cv::Rect>& detectedObjectsInRegions);
};
//! @}
} //end of cv namespace
#endif

View File

@@ -1,179 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_OBJDETECT_FACE_HPP
#define OPENCV_OBJDETECT_FACE_HPP
#include <opencv2/core.hpp>
namespace cv
{
//! @addtogroup objdetect_dnn_face
//! @{
/** @brief DNN-based face detector
model download link: https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet
*/
class CV_EXPORTS_W FaceDetectorYN
{
public:
virtual ~FaceDetectorYN() {}
/** @brief Set the size for the network input, which overwrites the input size of creating model. Call this method when the size of input image does not match the input size when creating model
*
* @param input_size the size of the input image
*/
CV_WRAP virtual void setInputSize(const Size& input_size) = 0;
CV_WRAP virtual Size getInputSize() = 0;
/** @brief Set the score threshold to filter out bounding boxes of score less than the given value
*
* @param score_threshold threshold for filtering out bounding boxes
*/
CV_WRAP virtual void setScoreThreshold(float score_threshold) = 0;
CV_WRAP virtual float getScoreThreshold() = 0;
/** @brief Set the Non-maximum-suppression threshold to suppress bounding boxes that have IoU greater than the given value
*
* @param nms_threshold threshold for NMS operation
*/
CV_WRAP virtual void setNMSThreshold(float nms_threshold) = 0;
CV_WRAP virtual float getNMSThreshold() = 0;
/** @brief Set the number of bounding boxes preserved before NMS
*
* @param top_k the number of bounding boxes to preserve from top rank based on score
*/
CV_WRAP virtual void setTopK(int top_k) = 0;
CV_WRAP virtual int getTopK() = 0;
/** @brief Detects faces in the input image. Following is an example output.
* ![image](pics/lena-face-detection.jpg)
* @param image an image to detect
* @param faces detection results stored in a 2D cv::Mat of shape [num_faces, 15]
* - 0-1: x, y of bbox top left corner
* - 2-3: width, height of bbox
* - 4-5: x, y of right eye (blue point in the example image)
* - 6-7: x, y of left eye (red point in the example image)
* - 8-9: x, y of nose tip (green point in the example image)
* - 10-11: x, y of right corner of mouth (pink point in the example image)
* - 12-13: x, y of left corner of mouth (yellow point in the example image)
* - 14: face score
*/
CV_WRAP virtual int detect(InputArray image, OutputArray faces) = 0;
/** @brief Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
* @param backend_id the id of backend
* @param target_id the id of target device
*/
CV_WRAP static Ptr<FaceDetectorYN> create(CV_WRAP_FILE_PATH const String& model,
CV_WRAP_FILE_PATH const String& config,
const Size& input_size,
float score_threshold = 0.9f,
float nms_threshold = 0.3f,
int top_k = 5000,
int backend_id = 0,
int target_id = 0);
/** @overload
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
* @param backend_id the id of backend
* @param target_id the id of target device
*/
CV_WRAP static Ptr<FaceDetectorYN> create(const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
const Size& input_size,
float score_threshold = 0.9f,
float nms_threshold = 0.3f,
int top_k = 5000,
int backend_id = 0,
int target_id = 0);
};
/** @brief DNN-based face recognizer
model download link: https://github.com/opencv/opencv_zoo/tree/master/models/face_recognition_sface
*/
class CV_EXPORTS_W FaceRecognizerSF
{
public:
virtual ~FaceRecognizerSF() {}
/** @brief Definition of distance used for calculating the distance between two face features
*/
enum DisType { FR_COSINE=0, FR_NORM_L2=1 };
/** @brief Aligns detected face with the source input image and crops it
* @param src_img input image
* @param face_box the detected face result from the input image
* @param aligned_img output aligned image
*/
CV_WRAP virtual void alignCrop(InputArray src_img, InputArray face_box, OutputArray aligned_img) const = 0;
/** @brief Extracts face feature from aligned image
* @param aligned_img input aligned image
* @param face_feature output face feature
*/
CV_WRAP virtual void feature(InputArray aligned_img, OutputArray face_feature) = 0;
/** @brief Calculates the distance between two face features
* @param face_feature1 the first input feature
* @param face_feature2 the second input feature of the same size and the same type as face_feature1
* @param dis_type defines how to calculate the distance between two face features with optional values "FR_COSINE" or "FR_NORM_L2"
*/
CV_WRAP virtual double match(InputArray face_feature1, InputArray face_feature2, int dis_type = FaceRecognizerSF::FR_COSINE) const = 0;
/** @brief Creates an instance of this class with given parameters
* @param model the path of the onnx model used for face recognition
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param backend_id the id of backend
* @param target_id the id of target device
*/
CV_WRAP static Ptr<FaceRecognizerSF> create(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config, int backend_id = 0, int target_id = 0);
/**
* @brief Creates an instance of this class from a buffer containing the model weights and configuration.
* @param framework Name of the framework (ONNX, etc.)
* @param bufferModel A buffer containing the binary model weights.
* @param bufferConfig A buffer containing the network configuration.
* @param backend_id The id of the backend.
* @param target_id The id of the target device.
*
* @return A pointer to the created instance of FaceRecognizerSF.
*/
CV_WRAP static Ptr<FaceRecognizerSF> create(const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
int backend_id = 0,
int target_id = 0);
};
//! @}
} // namespace cv
#endif

View File

@@ -1,96 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_GRAPHICAL_CODE_DETECTOR_HPP
#define OPENCV_OBJDETECT_GRAPHICAL_CODE_DETECTOR_HPP
#include <opencv2/core.hpp>
namespace cv {
//! @addtogroup objdetect_common
//! @{
class CV_EXPORTS_W_SIMPLE GraphicalCodeDetector {
public:
CV_DEPRECATED_EXTERNAL // avoid using in C++ code, will be moved to "protected" (need to fix bindings first)
GraphicalCodeDetector();
GraphicalCodeDetector(const GraphicalCodeDetector&) = default;
GraphicalCodeDetector(GraphicalCodeDetector&&) = default;
GraphicalCodeDetector& operator=(const GraphicalCodeDetector&) = default;
GraphicalCodeDetector& operator=(GraphicalCodeDetector&&) = default;
/** @brief Detects graphical code in image and returns the quadrangle containing the code.
@param img grayscale or color (BGR) image containing (or not) graphical code.
@param points Output vector of vertices of the minimum-area quadrangle containing the code.
*/
CV_WRAP bool detect(InputArray img, OutputArray points) const;
/** @brief Decodes graphical code in image once it's found by the detect() method.
Returns UTF8-encoded output string or empty string if the code cannot be decoded.
@param img grayscale or color (BGR) image containing graphical code.
@param points Quadrangle vertices found by detect() method (or some other algorithm).
@param straight_code The optional output image containing binarized code, will be empty if not found.
*/
CV_WRAP std::string decode(InputArray img, InputArray points, OutputArray straight_code = noArray()) const;
/** @brief Both detects and decodes graphical code
@param img grayscale or color (BGR) image containing graphical code.
@param points optional output array of vertices of the found graphical code quadrangle, will be empty if not found.
@param straight_code The optional output image containing binarized code
*/
CV_WRAP std::string detectAndDecode(InputArray img, OutputArray points = noArray(),
OutputArray straight_code = noArray()) const;
/** @brief Detects graphical codes in image and returns the vector of the quadrangles containing the codes.
@param img grayscale or color (BGR) image containing (or not) graphical codes.
@param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes.
*/
CV_WRAP bool detectMulti(InputArray img, OutputArray points) const;
/** @brief Decodes graphical codes in image once it's found by the detect() method.
@param img grayscale or color (BGR) image containing graphical codes.
@param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
@param points vector of Quadrangle vertices found by detect() method (or some other algorithm).
@param straight_code The optional output vector of images containing binarized codes
*/
CV_WRAP bool decodeMulti(InputArray img, InputArray points, CV_OUT std::vector<std::string>& decoded_info,
OutputArrayOfArrays straight_code = noArray()) const;
/** @brief Both detects and decodes graphical codes
@param img grayscale or color (BGR) image containing graphical codes.
@param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
@param points optional output vector of vertices of the found graphical code quadrangles. Will be empty if not found.
@param straight_code The optional vector of images containing binarized codes
- If there are QR codes encoded with a Structured Append mode on the image and all of them detected and decoded correctly,
method writes a full message to position corresponds to 0-th code in a sequence. The rest of QR codes from the same sequence
have empty string.
*/
CV_WRAP bool detectAndDecodeMulti(InputArray img, CV_OUT std::vector<std::string>& decoded_info, OutputArray points = noArray(),
OutputArrayOfArrays straight_code = noArray()) const;
#ifdef OPENCV_BINDINGS_PARSER
CV_WRAP_AS(detectAndDecodeBytes) NativeByteArray detectAndDecode(InputArray img, OutputArray points = noArray(),
OutputArray straight_code = noArray()) const;
CV_WRAP_AS(decodeBytes) NativeByteArray decode(InputArray img, InputArray points, OutputArray straight_code = noArray()) const;
CV_WRAP_AS(decodeBytesMulti) bool decodeMulti(InputArray img, InputArray points, CV_OUT std::vector<NativeByteArray>& decoded_info,
OutputArrayOfArrays straight_code = noArray()) const;
CV_WRAP_AS(detectAndDecodeBytesMulti) bool detectAndDecodeMulti(InputArray img, CV_OUT std::vector<NativeByteArray>& decoded_info, OutputArray points = noArray(),
OutputArrayOfArrays straight_code = noArray()) const;
#endif
struct Impl;
protected:
Ptr<Impl> p;
};
//! @}
}
#endif

View File

@@ -1,48 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/objdetect.hpp"