core opencv framework added

This commit is contained in:
sangeetha 2024-07-31 15:10:54 +05:30
parent bb98376422
commit b1ccbdce13
884 changed files with 379946 additions and 0 deletions

View File

@ -0,0 +1 @@
Versions/Current/Headers

View File

@ -0,0 +1 @@
Versions/Current/Modules

View File

@ -0,0 +1 @@
Versions/Current/Resources

View File

@ -0,0 +1,303 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
#import "KAZE.h"
// C++: enum DescriptorType (cv.AKAZE.DescriptorType)
typedef NS_ENUM(int, DescriptorType) {
AKAZE_DESCRIPTOR_KAZE_UPRIGHT NS_SWIFT_NAME(DESCRIPTOR_KAZE_UPRIGHT) = 2,
AKAZE_DESCRIPTOR_KAZE NS_SWIFT_NAME(DESCRIPTOR_KAZE) = 3,
AKAZE_DESCRIPTOR_MLDB_UPRIGHT NS_SWIFT_NAME(DESCRIPTOR_MLDB_UPRIGHT) = 4,
AKAZE_DESCRIPTOR_MLDB NS_SWIFT_NAME(DESCRIPTOR_MLDB) = 5
};
NS_ASSUME_NONNULL_BEGIN
// C++: class AKAZE
/**
* Class implementing the AKAZE keypoint detector and descriptor extractor, described in CITE: ANB13.
*
* AKAZE descriptors can only be used with KAZE or AKAZE keypoints. This class is thread-safe.
*
* NOTE: When you need descriptors use Feature2D::detectAndCompute, which
* provides better performance. When using Feature2D::detect followed by
* Feature2D::compute scale space pyramid is computed twice.
*
* NOTE: AKAZE implements T-API. When image is passed as UMat some parts of the algorithm
* will use OpenCL.
*
* NOTE: [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear
* Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In
* British Machine Vision Conference (BMVC), Bristol, UK, September 2013.
*
* Member of `Features2d`
*/
CV_EXPORTS @interface AKAZE : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::AKAZE> nativePtrAKAZE;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::AKAZE>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::AKAZE>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_AKAZE cv::AKAZE::create(AKAZE_DescriptorType descriptor_type = AKAZE::DESCRIPTOR_MLDB, int descriptor_size = 0, int descriptor_channels = 3, float threshold = 0.001f, int nOctaves = 4, int nOctaveLayers = 4, KAZE_DiffusivityType diffusivity = KAZE::DIFF_PM_G2, int max_points = -1)
//
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
* @param threshold Detector response threshold to accept point
* @param nOctaves Maximum octave evolution of the image
* @param nOctaveLayers Default number of sublevels per scale level
* @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
* DIFF_CHARBONNIER
* @param max_points Maximum amount of returned points. In case if image contains
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size descriptor_channels:(int)descriptor_channels threshold:(float)threshold nOctaves:(int)nOctaves nOctaveLayers:(int)nOctaveLayers diffusivity:(DiffusivityType)diffusivity max_points:(int)max_points NS_SWIFT_NAME(create(descriptor_type:descriptor_size:descriptor_channels:threshold:nOctaves:nOctaveLayers:diffusivity:max_points:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
* @param threshold Detector response threshold to accept point
* @param nOctaves Maximum octave evolution of the image
* @param nOctaveLayers Default number of sublevels per scale level
* @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size descriptor_channels:(int)descriptor_channels threshold:(float)threshold nOctaves:(int)nOctaves nOctaveLayers:(int)nOctaveLayers diffusivity:(DiffusivityType)diffusivity NS_SWIFT_NAME(create(descriptor_type:descriptor_size:descriptor_channels:threshold:nOctaves:nOctaveLayers:diffusivity:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
* @param threshold Detector response threshold to accept point
* @param nOctaves Maximum octave evolution of the image
* @param nOctaveLayers Default number of sublevels per scale level
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size descriptor_channels:(int)descriptor_channels threshold:(float)threshold nOctaves:(int)nOctaves nOctaveLayers:(int)nOctaveLayers NS_SWIFT_NAME(create(descriptor_type:descriptor_size:descriptor_channels:threshold:nOctaves:nOctaveLayers:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
* @param threshold Detector response threshold to accept point
* @param nOctaves Maximum octave evolution of the image
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size descriptor_channels:(int)descriptor_channels threshold:(float)threshold nOctaves:(int)nOctaves NS_SWIFT_NAME(create(descriptor_type:descriptor_size:descriptor_channels:threshold:nOctaves:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
* @param threshold Detector response threshold to accept point
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size descriptor_channels:(int)descriptor_channels threshold:(float)threshold NS_SWIFT_NAME(create(descriptor_type:descriptor_size:descriptor_channels:threshold:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size descriptor_channels:(int)descriptor_channels NS_SWIFT_NAME(create(descriptor_type:descriptor_size:descriptor_channels:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type descriptor_size:(int)descriptor_size NS_SWIFT_NAME(create(descriptor_type:descriptor_size:));
/**
* The AKAZE constructor
*
* @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create:(DescriptorType)descriptor_type NS_SWIFT_NAME(create(descriptor_type:));
/**
* The AKAZE constructor
*
* DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
* DIFF_CHARBONNIER
* more features, then the features with highest response are returned.
* Negative value means no limitation.
*/
+ (AKAZE*)create NS_SWIFT_NAME(create());
//
// void cv::AKAZE::setDescriptorType(AKAZE_DescriptorType dtype)
//
- (void)setDescriptorType:(DescriptorType)dtype NS_SWIFT_NAME(setDescriptorType(dtype:));
//
// AKAZE_DescriptorType cv::AKAZE::getDescriptorType()
//
- (DescriptorType)getDescriptorType NS_SWIFT_NAME(getDescriptorType());
//
// void cv::AKAZE::setDescriptorSize(int dsize)
//
- (void)setDescriptorSize:(int)dsize NS_SWIFT_NAME(setDescriptorSize(dsize:));
//
// int cv::AKAZE::getDescriptorSize()
//
- (int)getDescriptorSize NS_SWIFT_NAME(getDescriptorSize());
//
// void cv::AKAZE::setDescriptorChannels(int dch)
//
- (void)setDescriptorChannels:(int)dch NS_SWIFT_NAME(setDescriptorChannels(dch:));
//
// int cv::AKAZE::getDescriptorChannels()
//
- (int)getDescriptorChannels NS_SWIFT_NAME(getDescriptorChannels());
//
// void cv::AKAZE::setThreshold(double threshold)
//
- (void)setThreshold:(double)threshold NS_SWIFT_NAME(setThreshold(threshold:));
//
// double cv::AKAZE::getThreshold()
//
- (double)getThreshold NS_SWIFT_NAME(getThreshold());
//
// void cv::AKAZE::setNOctaves(int octaves)
//
- (void)setNOctaves:(int)octaves NS_SWIFT_NAME(setNOctaves(octaves:));
//
// int cv::AKAZE::getNOctaves()
//
- (int)getNOctaves NS_SWIFT_NAME(getNOctaves());
//
// void cv::AKAZE::setNOctaveLayers(int octaveLayers)
//
- (void)setNOctaveLayers:(int)octaveLayers NS_SWIFT_NAME(setNOctaveLayers(octaveLayers:));
//
// int cv::AKAZE::getNOctaveLayers()
//
- (int)getNOctaveLayers NS_SWIFT_NAME(getNOctaveLayers());
//
// void cv::AKAZE::setDiffusivity(KAZE_DiffusivityType diff)
//
- (void)setDiffusivity:(DiffusivityType)diff NS_SWIFT_NAME(setDiffusivity(diff:));
//
// KAZE_DiffusivityType cv::AKAZE::getDiffusivity()
//
- (DiffusivityType)getDiffusivity NS_SWIFT_NAME(getDiffusivity());
//
// String cv::AKAZE::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
//
// void cv::AKAZE::setMaxPoints(int max_points)
//
- (void)setMaxPoints:(int)max_points NS_SWIFT_NAME(setMaxPoints(max_points:));
//
// int cv::AKAZE::getMaxPoints()
//
- (int)getMaxPoints NS_SWIFT_NAME(getMaxPoints());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,420 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ml.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "StatModel.h"
@class Mat;
@class TermCriteria;
// C++: enum ActivationFunctions (cv.ml.ANN_MLP.ActivationFunctions)
typedef NS_ENUM(int, ActivationFunctions) {
ANN_MLP_IDENTITY NS_SWIFT_NAME(IDENTITY) = 0,
ANN_MLP_SIGMOID_SYM NS_SWIFT_NAME(SIGMOID_SYM) = 1,
ANN_MLP_GAUSSIAN NS_SWIFT_NAME(GAUSSIAN) = 2,
ANN_MLP_RELU NS_SWIFT_NAME(RELU) = 3,
ANN_MLP_LEAKYRELU NS_SWIFT_NAME(LEAKYRELU) = 4
};
// C++: enum TrainFlags (cv.ml.ANN_MLP.TrainFlags)
typedef NS_ENUM(int, TrainFlags) {
ANN_MLP_UPDATE_WEIGHTS NS_SWIFT_NAME(UPDATE_WEIGHTS) = 1,
ANN_MLP_NO_INPUT_SCALE NS_SWIFT_NAME(NO_INPUT_SCALE) = 2,
ANN_MLP_NO_OUTPUT_SCALE NS_SWIFT_NAME(NO_OUTPUT_SCALE) = 4
};
// C++: enum TrainingMethods (cv.ml.ANN_MLP.TrainingMethods)
typedef NS_ENUM(int, TrainingMethods) {
ANN_MLP_BACKPROP NS_SWIFT_NAME(BACKPROP) = 0,
ANN_MLP_RPROP NS_SWIFT_NAME(RPROP) = 1,
ANN_MLP_ANNEAL NS_SWIFT_NAME(ANNEAL) = 2
};
NS_ASSUME_NONNULL_BEGIN
// C++: class ANN_MLP
/**
* Artificial Neural Networks - Multi-Layer Perceptrons.
*
* Unlike many other models in ML that are constructed and trained at once, in the MLP model these
* steps are separated. First, a network with the specified topology is created using the non-default
* constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is
* trained using a set of input and output vectors. The training procedure can be repeated more than
* once, that is, the weights can be adjusted based on the new training data.
*
* Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
*
* @see REF: ml_intro_ann
*
* Member of `Ml`
*/
CV_EXPORTS @interface ANN_MLP : StatModel
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ml::ANN_MLP> nativePtrANN_MLP;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ml::ANN_MLP>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ml::ANN_MLP>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ml::ANN_MLP::setTrainMethod(int method, double param1 = 0, double param2 = 0)
//
/**
* Sets training method and common parameters.
* @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.
* @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL.
* @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL.
*/
- (void)setTrainMethod:(int)method param1:(double)param1 param2:(double)param2 NS_SWIFT_NAME(setTrainMethod(method:param1:param2:));
/**
* Sets training method and common parameters.
* @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.
* @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL.
*/
- (void)setTrainMethod:(int)method param1:(double)param1 NS_SWIFT_NAME(setTrainMethod(method:param1:));
/**
* Sets training method and common parameters.
* @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.
*/
- (void)setTrainMethod:(int)method NS_SWIFT_NAME(setTrainMethod(method:));
//
// int cv::ml::ANN_MLP::getTrainMethod()
//
/**
* Returns current training method
*/
- (int)getTrainMethod NS_SWIFT_NAME(getTrainMethod());
//
// void cv::ml::ANN_MLP::setActivationFunction(int type, double param1 = 0, double param2 = 0)
//
/**
* Initialize the activation function for each neuron.
* Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
* @param type The type of activation function. See ANN_MLP::ActivationFunctions.
* @param param1 The first parameter of the activation function, `$$\alpha$$`. Default value is 0.
* @param param2 The second parameter of the activation function, `$$\beta$$`. Default value is 0.
*/
- (void)setActivationFunction:(int)type param1:(double)param1 param2:(double)param2 NS_SWIFT_NAME(setActivationFunction(type:param1:param2:));
/**
* Initialize the activation function for each neuron.
* Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
* @param type The type of activation function. See ANN_MLP::ActivationFunctions.
* @param param1 The first parameter of the activation function, `$$\alpha$$`. Default value is 0.
*/
- (void)setActivationFunction:(int)type param1:(double)param1 NS_SWIFT_NAME(setActivationFunction(type:param1:));
/**
* Initialize the activation function for each neuron.
* Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
* @param type The type of activation function. See ANN_MLP::ActivationFunctions.
*/
- (void)setActivationFunction:(int)type NS_SWIFT_NAME(setActivationFunction(type:));
//
// void cv::ml::ANN_MLP::setLayerSizes(Mat _layer_sizes)
//
/**
* Integer vector specifying the number of neurons in each layer including the input and output layers.
* The very first element specifies the number of elements in the input layer.
* The last element - number of elements in the output layer. Default value is empty Mat.
* @see `-getLayerSizes:`
*/
- (void)setLayerSizes:(Mat*)_layer_sizes NS_SWIFT_NAME(setLayerSizes(_layer_sizes:));
//
// Mat cv::ml::ANN_MLP::getLayerSizes()
//
/**
* Integer vector specifying the number of neurons in each layer including the input and output layers.
* The very first element specifies the number of elements in the input layer.
* The last element - number of elements in the output layer.
* @see `-setLayerSizes:`
*/
- (Mat*)getLayerSizes NS_SWIFT_NAME(getLayerSizes());
//
// TermCriteria cv::ml::ANN_MLP::getTermCriteria()
//
/**
* @see `-setTermCriteria:`
*/
- (TermCriteria*)getTermCriteria NS_SWIFT_NAME(getTermCriteria());
//
// void cv::ml::ANN_MLP::setTermCriteria(TermCriteria val)
//
/**
* getTermCriteria @see `-getTermCriteria:`
*/
- (void)setTermCriteria:(TermCriteria*)val NS_SWIFT_NAME(setTermCriteria(val:));
//
// double cv::ml::ANN_MLP::getBackpropWeightScale()
//
/**
* @see `-setBackpropWeightScale:`
*/
- (double)getBackpropWeightScale NS_SWIFT_NAME(getBackpropWeightScale());
//
// void cv::ml::ANN_MLP::setBackpropWeightScale(double val)
//
/**
* getBackpropWeightScale @see `-getBackpropWeightScale:`
*/
- (void)setBackpropWeightScale:(double)val NS_SWIFT_NAME(setBackpropWeightScale(val:));
//
// double cv::ml::ANN_MLP::getBackpropMomentumScale()
//
/**
* @see `-setBackpropMomentumScale:`
*/
- (double)getBackpropMomentumScale NS_SWIFT_NAME(getBackpropMomentumScale());
//
// void cv::ml::ANN_MLP::setBackpropMomentumScale(double val)
//
/**
* getBackpropMomentumScale @see `-getBackpropMomentumScale:`
*/
- (void)setBackpropMomentumScale:(double)val NS_SWIFT_NAME(setBackpropMomentumScale(val:));
//
// double cv::ml::ANN_MLP::getRpropDW0()
//
/**
* @see `-setRpropDW0:`
*/
- (double)getRpropDW0 NS_SWIFT_NAME(getRpropDW0());
//
// void cv::ml::ANN_MLP::setRpropDW0(double val)
//
/**
* getRpropDW0 @see `-getRpropDW0:`
*/
- (void)setRpropDW0:(double)val NS_SWIFT_NAME(setRpropDW0(val:));
//
// double cv::ml::ANN_MLP::getRpropDWPlus()
//
/**
* @see `-setRpropDWPlus:`
*/
- (double)getRpropDWPlus NS_SWIFT_NAME(getRpropDWPlus());
//
// void cv::ml::ANN_MLP::setRpropDWPlus(double val)
//
/**
* getRpropDWPlus @see `-getRpropDWPlus:`
*/
- (void)setRpropDWPlus:(double)val NS_SWIFT_NAME(setRpropDWPlus(val:));
//
// double cv::ml::ANN_MLP::getRpropDWMinus()
//
/**
* @see `-setRpropDWMinus:`
*/
- (double)getRpropDWMinus NS_SWIFT_NAME(getRpropDWMinus());
//
// void cv::ml::ANN_MLP::setRpropDWMinus(double val)
//
/**
* getRpropDWMinus @see `-getRpropDWMinus:`
*/
- (void)setRpropDWMinus:(double)val NS_SWIFT_NAME(setRpropDWMinus(val:));
//
// double cv::ml::ANN_MLP::getRpropDWMin()
//
/**
* @see `-setRpropDWMin:`
*/
- (double)getRpropDWMin NS_SWIFT_NAME(getRpropDWMin());
//
// void cv::ml::ANN_MLP::setRpropDWMin(double val)
//
/**
* getRpropDWMin @see `-getRpropDWMin:`
*/
- (void)setRpropDWMin:(double)val NS_SWIFT_NAME(setRpropDWMin(val:));
//
// double cv::ml::ANN_MLP::getRpropDWMax()
//
/**
* @see `-setRpropDWMax:`
*/
- (double)getRpropDWMax NS_SWIFT_NAME(getRpropDWMax());
//
// void cv::ml::ANN_MLP::setRpropDWMax(double val)
//
/**
* getRpropDWMax @see `-getRpropDWMax:`
*/
- (void)setRpropDWMax:(double)val NS_SWIFT_NAME(setRpropDWMax(val:));
//
// double cv::ml::ANN_MLP::getAnnealInitialT()
//
/**
* @see `-setAnnealInitialT:`
*/
- (double)getAnnealInitialT NS_SWIFT_NAME(getAnnealInitialT());
//
// void cv::ml::ANN_MLP::setAnnealInitialT(double val)
//
/**
* getAnnealInitialT @see `-getAnnealInitialT:`
*/
- (void)setAnnealInitialT:(double)val NS_SWIFT_NAME(setAnnealInitialT(val:));
//
// double cv::ml::ANN_MLP::getAnnealFinalT()
//
/**
* @see `-setAnnealFinalT:`
*/
- (double)getAnnealFinalT NS_SWIFT_NAME(getAnnealFinalT());
//
// void cv::ml::ANN_MLP::setAnnealFinalT(double val)
//
/**
* getAnnealFinalT @see `-getAnnealFinalT:`
*/
- (void)setAnnealFinalT:(double)val NS_SWIFT_NAME(setAnnealFinalT(val:));
//
// double cv::ml::ANN_MLP::getAnnealCoolingRatio()
//
/**
* @see `-setAnnealCoolingRatio:`
*/
- (double)getAnnealCoolingRatio NS_SWIFT_NAME(getAnnealCoolingRatio());
//
// void cv::ml::ANN_MLP::setAnnealCoolingRatio(double val)
//
/**
* getAnnealCoolingRatio @see `-getAnnealCoolingRatio:`
*/
- (void)setAnnealCoolingRatio:(double)val NS_SWIFT_NAME(setAnnealCoolingRatio(val:));
//
// int cv::ml::ANN_MLP::getAnnealItePerStep()
//
/**
* @see `-setAnnealItePerStep:`
*/
- (int)getAnnealItePerStep NS_SWIFT_NAME(getAnnealItePerStep());
//
// void cv::ml::ANN_MLP::setAnnealItePerStep(int val)
//
/**
* getAnnealItePerStep @see `-getAnnealItePerStep:`
*/
- (void)setAnnealItePerStep:(int)val NS_SWIFT_NAME(setAnnealItePerStep(val:));
//
// Mat cv::ml::ANN_MLP::getWeights(int layerIdx)
//
- (Mat*)getWeights:(int)layerIdx NS_SWIFT_NAME(getWeights(layerIdx:));
//
// static Ptr_ANN_MLP cv::ml::ANN_MLP::create()
//
/**
* Creates empty model
*
* Use StatModel::train to train the model, Algorithm::load\<ANN_MLP\>(filename) to load the pre-trained model.
* Note that the train method has optional flags: ANN_MLP::TrainFlags.
*/
+ (ANN_MLP*)create NS_SWIFT_NAME(create());
//
// static Ptr_ANN_MLP cv::ml::ANN_MLP::load(String filepath)
//
/**
* Loads and creates a serialized ANN from a file
*
* Use ANN::save to serialize and store an ANN to disk.
* Load the ANN from this file again, by calling this function with the path to the file.
*
* @param filepath path to serialized ANN
*/
+ (ANN_MLP*)load:(NSString*)filepath NS_SWIFT_NAME(load(filepath:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,103 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/edge_filter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class AdaptiveManifoldFilter
/**
* Interface for Adaptive Manifold Filter realizations.
*
* For more details about this filter see CITE: Gastal12 and References_.
*
* Below listed optional parameters which may be set up with Algorithm::set function.
* - member double sigma_s = 16.0
* Spatial standard deviation.
* - member double sigma_r = 0.2
* Color space standard deviation.
* - member int tree_height = -1
* Height of the manifold tree (default = -1 : automatically computed).
* - member int num_pca_iterations = 1
* Number of iterations to computed the eigenvector.
* - member bool adjust_outliers = false
* Specify adjust outliers using Eq. 9 or not.
* - member bool use_RNG = true
* Specify use random number generator to compute eigenvector or not.
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface AdaptiveManifoldFilter : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::AdaptiveManifoldFilter> nativePtrAdaptiveManifoldFilter;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::AdaptiveManifoldFilter>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::AdaptiveManifoldFilter>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::AdaptiveManifoldFilter::filter(Mat src, Mat& dst, Mat joint = Mat())
//
/**
* Apply high-dimensional filtering using adaptive manifolds.
*
* @param src filtering image with any numbers of channels.
*
* @param dst output image.
*
* @param joint optional joint (also called as guided) image with any numbers of channels.
*/
- (void)filter:(Mat*)src dst:(Mat*)dst joint:(Mat*)joint NS_SWIFT_NAME(filter(src:dst:joint:));
/**
* Apply high-dimensional filtering using adaptive manifolds.
*
* @param src filtering image with any numbers of channels.
*
* @param dst output image.
*
*/
- (void)filter:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(filter(src:dst:));
//
// void cv::ximgproc::AdaptiveManifoldFilter::collectGarbage()
//
- (void)collectGarbage NS_SWIFT_NAME(collectGarbage());
//
// static Ptr_AdaptiveManifoldFilter cv::ximgproc::AdaptiveManifoldFilter::create()
//
+ (AdaptiveManifoldFilter*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,108 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
@class Feature2D;
@class FloatVector;
NS_ASSUME_NONNULL_BEGIN
// C++: class AffineFeature
/**
* Class for implementing the wrapper which makes detectors and extractors to be affine invariant,
* described as ASIFT in CITE: YM11 .
*
* Member of `Features2d`
*/
CV_EXPORTS @interface AffineFeature : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::AffineFeature> nativePtrAffineFeature;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::AffineFeature>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::AffineFeature>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_AffineFeature cv::AffineFeature::create(Ptr_Feature2D backend, int maxTilt = 5, int minTilt = 0, float tiltStep = 1.4142135623730951f, float rotateStepBase = 72)
//
/**
* @param backend The detector/extractor you want to use as backend.
* @param maxTilt The highest power index of tilt factor. 5 is used in the paper as tilt sampling range n.
* @param minTilt The lowest power index of tilt factor. 0 is used in the paper.
* @param tiltStep Tilt sampling step `$$\delta_t$$` in Algorithm 1 in the paper.
* @param rotateStepBase Rotation sampling step factor b in Algorithm 1 in the paper.
*/
+ (AffineFeature*)create:(Feature2D*)backend maxTilt:(int)maxTilt minTilt:(int)minTilt tiltStep:(float)tiltStep rotateStepBase:(float)rotateStepBase NS_SWIFT_NAME(create(backend:maxTilt:minTilt:tiltStep:rotateStepBase:));
/**
* @param backend The detector/extractor you want to use as backend.
* @param maxTilt The highest power index of tilt factor. 5 is used in the paper as tilt sampling range n.
* @param minTilt The lowest power index of tilt factor. 0 is used in the paper.
* @param tiltStep Tilt sampling step `$$\delta_t$$` in Algorithm 1 in the paper.
*/
+ (AffineFeature*)create:(Feature2D*)backend maxTilt:(int)maxTilt minTilt:(int)minTilt tiltStep:(float)tiltStep NS_SWIFT_NAME(create(backend:maxTilt:minTilt:tiltStep:));
/**
* @param backend The detector/extractor you want to use as backend.
* @param maxTilt The highest power index of tilt factor. 5 is used in the paper as tilt sampling range n.
* @param minTilt The lowest power index of tilt factor. 0 is used in the paper.
*/
+ (AffineFeature*)create:(Feature2D*)backend maxTilt:(int)maxTilt minTilt:(int)minTilt NS_SWIFT_NAME(create(backend:maxTilt:minTilt:));
/**
* @param backend The detector/extractor you want to use as backend.
* @param maxTilt The highest power index of tilt factor. 5 is used in the paper as tilt sampling range n.
*/
+ (AffineFeature*)create:(Feature2D*)backend maxTilt:(int)maxTilt NS_SWIFT_NAME(create(backend:maxTilt:));
/**
* @param backend The detector/extractor you want to use as backend.
*/
+ (AffineFeature*)create:(Feature2D*)backend NS_SWIFT_NAME(create(backend:));
//
// void cv::AffineFeature::setViewParams(vector_float tilts, vector_float rolls)
//
- (void)setViewParams:(FloatVector*)tilts rolls:(FloatVector*)rolls NS_SWIFT_NAME(setViewParams(tilts:rolls:));
//
// void cv::AffineFeature::getViewParams(vector_float tilts, vector_float rolls)
//
- (void)getViewParams:(FloatVector*)tilts rolls:(FloatVector*)rolls NS_SWIFT_NAME(getViewParams(tilts:rolls:));
//
// String cv::AffineFeature::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,56 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/xfeatures2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class AffineFeature2D
/**
* Class implementing affine adaptation for key points.
*
* A REF: FeatureDetector and a REF: DescriptorExtractor are wrapped to augment the
* detected points with their affine invariant elliptic region and to compute
* the feature descriptors on the regions after warping them into circles.
*
* The interface is equivalent to REF: Feature2D, adding operations for
* REF: Elliptic_KeyPoint "Elliptic_KeyPoints" instead of REF: KeyPoint "KeyPoints".
*
* Member of `Xfeatures2d`
*/
CV_EXPORTS @interface AffineFeature2D : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::xfeatures2d::AffineFeature2D> nativePtrAffineFeature2D;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::xfeatures2d::AffineFeature2D>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::xfeatures2d::AffineFeature2D>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,118 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
// C++: enum AgastDetectorType (cv.AgastFeatureDetector.DetectorType)
typedef NS_ENUM(int, AgastDetectorType) {
AgastFeatureDetector_AGAST_5_8 NS_SWIFT_NAME(AGAST_5_8) = 0,
AgastFeatureDetector_AGAST_7_12d NS_SWIFT_NAME(AGAST_7_12d) = 1,
AgastFeatureDetector_AGAST_7_12s NS_SWIFT_NAME(AGAST_7_12s) = 2,
AgastFeatureDetector_OAST_9_16 NS_SWIFT_NAME(OAST_9_16) = 3
};
NS_ASSUME_NONNULL_BEGIN
// C++: class AgastFeatureDetector
/**
* Wrapping class for feature detection using the AGAST method. :
*
* Member of `Features2d`
*/
CV_EXPORTS @interface AgastFeatureDetector : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::AgastFeatureDetector> nativePtrAgastFeatureDetector;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::AgastFeatureDetector>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::AgastFeatureDetector>)nativePtr;
#endif
#pragma mark - Class Constants
@property (class, readonly) int THRESHOLD NS_SWIFT_NAME(THRESHOLD);
@property (class, readonly) int NONMAX_SUPPRESSION NS_SWIFT_NAME(NONMAX_SUPPRESSION);
#pragma mark - Methods
//
// static Ptr_AgastFeatureDetector cv::AgastFeatureDetector::create(int threshold = 10, bool nonmaxSuppression = true, AgastFeatureDetector_DetectorType type = AgastFeatureDetector::OAST_9_16)
//
+ (AgastFeatureDetector*)create:(int)threshold nonmaxSuppression:(BOOL)nonmaxSuppression type:(AgastDetectorType)type NS_SWIFT_NAME(create(threshold:nonmaxSuppression:type:));
+ (AgastFeatureDetector*)create:(int)threshold nonmaxSuppression:(BOOL)nonmaxSuppression NS_SWIFT_NAME(create(threshold:nonmaxSuppression:));
+ (AgastFeatureDetector*)create:(int)threshold NS_SWIFT_NAME(create(threshold:));
+ (AgastFeatureDetector*)create NS_SWIFT_NAME(create());
//
// void cv::AgastFeatureDetector::setThreshold(int threshold)
//
- (void)setThreshold:(int)threshold NS_SWIFT_NAME(setThreshold(threshold:));
//
// int cv::AgastFeatureDetector::getThreshold()
//
- (int)getThreshold NS_SWIFT_NAME(getThreshold());
//
// void cv::AgastFeatureDetector::setNonmaxSuppression(bool f)
//
- (void)setNonmaxSuppression:(BOOL)f NS_SWIFT_NAME(setNonmaxSuppression(f:));
//
// bool cv::AgastFeatureDetector::getNonmaxSuppression()
//
- (BOOL)getNonmaxSuppression NS_SWIFT_NAME(getNonmaxSuppression());
//
// void cv::AgastFeatureDetector::setType(AgastFeatureDetector_DetectorType type)
//
- (void)setType:(AgastDetectorType)type NS_SWIFT_NAME(setType(type:));
//
// AgastFeatureDetector_DetectorType cv::AgastFeatureDetector::getType()
//
- (AgastDetectorType)getType NS_SWIFT_NAME(getType());
//
// String cv::AgastFeatureDetector::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,114 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class Algorithm
/**
* This is a base class for all more or less complex algorithms in OpenCV
*
* especially for classes of algorithms, for which there can be multiple implementations. The examples
* are stereo correspondence (for which there are algorithms like block matching, semi-global block
* matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians
* models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
* etc.).
*
* Here is example of SimpleBlobDetector use in your application via Algorithm interface:
* SNIPPET: snippets/core_various.cpp Algorithm
*
* Member of `Core`
*/
CV_EXPORTS @interface Algorithm : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::Algorithm> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::Algorithm>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::Algorithm>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::Algorithm::clear()
//
/**
* Clears the algorithm state
*/
- (void)clear NS_SWIFT_NAME(clear());
//
// void cv::Algorithm::write(FileStorage fs)
//
// Unknown type 'FileStorage' (I), skipping the function
//
// void cv::Algorithm::write(FileStorage fs, String name)
//
// Unknown type 'FileStorage' (I), skipping the function
//
// void cv::Algorithm::read(FileNode fn)
//
// Unknown type 'FileNode' (I), skipping the function
//
// bool cv::Algorithm::empty()
//
/**
* Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read
*/
- (BOOL)empty NS_SWIFT_NAME(empty());
//
// void cv::Algorithm::save(String filename)
//
/**
* Saves the algorithm to a file.
* In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs).
*/
- (void)save:(NSString*)filename NS_SWIFT_NAME(save(filename:));
//
// String cv::Algorithm::getDefaultName()
//
/**
* Returns the algorithm string identifier.
* This string is used as top level xml/yml node tag when the object is saved to a file or string.
*/
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,64 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/photo.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class AlignExposures
/**
* The base class for algorithms that align images of the same scene with different exposures
*
* Member of `Photo`
*/
CV_EXPORTS @interface AlignExposures : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::AlignExposures> nativePtrAlignExposures;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::AlignExposures>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::AlignExposures>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::AlignExposures::process(vector_Mat src, vector_Mat dst, Mat times, Mat response)
//
/**
* Aligns images
*
* @param src vector of input images
* @param dst vector of aligned images
* @param times vector of exposure time values for each image
* @param response 256x1 matrix with inverse camera response function for each pixel value, it should
* have the same number of channels as images.
*/
- (void)process:(NSArray<Mat*>*)src dst:(NSArray<Mat*>*)dst times:(Mat*)times response:(Mat*)response NS_SWIFT_NAME(process(src:dst:times:response:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,150 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/photo.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "AlignExposures.h"
@class Mat;
@class Point2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class AlignMTB
/**
* This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median
* luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
*
* It is invariant to exposure, so exposure values and camera response are not necessary.
*
* In this implementation new image regions are filled with zeros.
*
* For more information see CITE: GW03 .
*
* Member of `Photo`
*/
CV_EXPORTS @interface AlignMTB : AlignExposures
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::AlignMTB> nativePtrAlignMTB;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::AlignMTB>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::AlignMTB>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::AlignMTB::process(vector_Mat src, vector_Mat dst, Mat times, Mat response)
//
- (void)process:(NSArray<Mat*>*)src dst:(NSArray<Mat*>*)dst times:(Mat*)times response:(Mat*)response NS_SWIFT_NAME(process(src:dst:times:response:));
//
// void cv::AlignMTB::process(vector_Mat src, vector_Mat dst)
//
/**
* Short version of process, that doesn't take extra arguments.
*
* @param src vector of input images
* @param dst vector of aligned images
*/
- (void)process:(NSArray<Mat*>*)src dst:(NSArray<Mat*>*)dst NS_SWIFT_NAME(process(src:dst:));
//
// Point cv::AlignMTB::calculateShift(Mat img0, Mat img1)
//
/**
* Calculates shift between two images, i. e. how to shift the second image to correspond it with the
* first.
*
* @param img0 first image
* @param img1 second image
*/
- (Point2i*)calculateShift:(Mat*)img0 img1:(Mat*)img1 NS_SWIFT_NAME(calculateShift(img0:img1:));
//
// void cv::AlignMTB::shiftMat(Mat src, Mat& dst, Point shift)
//
/**
* Helper function, that shift Mat filling new regions with zeros.
*
* @param src input image
* @param dst result image
* @param shift shift value
*/
- (void)shiftMat:(Mat*)src dst:(Mat*)dst shift:(Point2i*)shift NS_SWIFT_NAME(shiftMat(src:dst:shift:));
//
// void cv::AlignMTB::computeBitmaps(Mat img, Mat& tb, Mat& eb)
//
/**
* Computes median threshold and exclude bitmaps of given image.
*
* @param img input image
* @param tb median threshold bitmap
* @param eb exclude bitmap
*/
- (void)computeBitmaps:(Mat*)img tb:(Mat*)tb eb:(Mat*)eb NS_SWIFT_NAME(computeBitmaps(img:tb:eb:));
//
// int cv::AlignMTB::getMaxBits()
//
- (int)getMaxBits NS_SWIFT_NAME(getMaxBits());
//
// void cv::AlignMTB::setMaxBits(int max_bits)
//
- (void)setMaxBits:(int)max_bits NS_SWIFT_NAME(setMaxBits(max_bits:));
//
// int cv::AlignMTB::getExcludeRange()
//
- (int)getExcludeRange NS_SWIFT_NAME(getExcludeRange());
//
// void cv::AlignMTB::setExcludeRange(int exclude_range)
//
- (void)setExcludeRange:(int)exclude_range NS_SWIFT_NAME(setExcludeRange(exclude_range:));
//
// bool cv::AlignMTB::getCut()
//
- (BOOL)getCut NS_SWIFT_NAME(getCut());
//
// void cv::AlignMTB::setCut(bool value)
//
- (void)setCut:(BOOL)value NS_SWIFT_NAME(setCut(value:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,16 @@
//
// ArrayUtil.h
//
// Created by Giles Payne on 2020/02/09.
//
#pragma once
#import <Foundation/Foundation.h>
/**
* Utility function to create and populate an NSMutableArray with a specific size
* @param size Size of array to create
* @param val Value with which to initialize array elements
*/
NSMutableArray* createArrayWithSize(int size, NSObject* val);

View File

@ -0,0 +1,782 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/aruco.hpp"
#import "aruco/charuco.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Board;
@class CharucoBoard;
@class DetectorParameters;
@class Dictionary;
@class Int4;
@class Mat;
@class Size2i;
@class TermCriteria;
// C++: enum PatternPositionType (cv.aruco.PatternPositionType)
typedef NS_ENUM(int, PatternPositionType) {
ARUCO_CCW_CENTER = 0,
ARUCO_CW_TOP_LEFT_CORNER = 1
};
NS_ASSUME_NONNULL_BEGIN
// C++: class Aruco
/**
* The Aruco module
*
* Member classes: `EstimateParameters`
*
* Member enums: `PatternPositionType`
*/
CV_EXPORTS @interface Aruco : NSObject
#pragma mark - Methods
//
// void cv::aruco::detectMarkers(Mat image, Ptr_Dictionary dictionary, vector_Mat& corners, Mat& ids, Ptr_DetectorParameters parameters = makePtr<DetectorParameters>(), vector_Mat& rejectedImgPoints = vector_Mat())
//
/**
* detect markers
* @deprecated Use class ArucoDetector::detectMarkers
*/
+ (void)detectMarkers:(Mat*)image dictionary:(Dictionary*)dictionary corners:(NSMutableArray<Mat*>*)corners ids:(Mat*)ids parameters:(DetectorParameters*)parameters rejectedImgPoints:(NSMutableArray<Mat*>*)rejectedImgPoints NS_SWIFT_NAME(detectMarkers(image:dictionary:corners:ids:parameters:rejectedImgPoints:)) DEPRECATED_ATTRIBUTE;
/**
* detect markers
* @deprecated Use class ArucoDetector::detectMarkers
*/
+ (void)detectMarkers:(Mat*)image dictionary:(Dictionary*)dictionary corners:(NSMutableArray<Mat*>*)corners ids:(Mat*)ids parameters:(DetectorParameters*)parameters NS_SWIFT_NAME(detectMarkers(image:dictionary:corners:ids:parameters:)) DEPRECATED_ATTRIBUTE;
/**
* detect markers
* @deprecated Use class ArucoDetector::detectMarkers
*/
+ (void)detectMarkers:(Mat*)image dictionary:(Dictionary*)dictionary corners:(NSMutableArray<Mat*>*)corners ids:(Mat*)ids NS_SWIFT_NAME(detectMarkers(image:dictionary:corners:ids:)) DEPRECATED_ATTRIBUTE;
//
// void cv::aruco::refineDetectedMarkers(Mat image, Ptr_Board board, vector_Mat& detectedCorners, Mat& detectedIds, vector_Mat& rejectedCorners, Mat cameraMatrix = Mat(), Mat distCoeffs = Mat(), float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true, Mat& recoveredIdxs = Mat(), Ptr_DetectorParameters parameters = makePtr<DetectorParameters>())
//
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs minRepDistance:(float)minRepDistance errorCorrectionRate:(float)errorCorrectionRate checkAllOrders:(BOOL)checkAllOrders recoveredIdxs:(Mat*)recoveredIdxs parameters:(DetectorParameters*)parameters NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:minRepDistance:errorCorrectionRate:checkAllOrders:recoveredIdxs:parameters:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs minRepDistance:(float)minRepDistance errorCorrectionRate:(float)errorCorrectionRate checkAllOrders:(BOOL)checkAllOrders recoveredIdxs:(Mat*)recoveredIdxs NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:minRepDistance:errorCorrectionRate:checkAllOrders:recoveredIdxs:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs minRepDistance:(float)minRepDistance errorCorrectionRate:(float)errorCorrectionRate checkAllOrders:(BOOL)checkAllOrders NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:minRepDistance:errorCorrectionRate:checkAllOrders:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs minRepDistance:(float)minRepDistance errorCorrectionRate:(float)errorCorrectionRate NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:minRepDistance:errorCorrectionRate:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs minRepDistance:(float)minRepDistance NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:minRepDistance:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:)) DEPRECATED_ATTRIBUTE;
/**
* refine detected markers
* @deprecated Use class ArucoDetector::refineDetectedMarkers
*/
+ (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:)) DEPRECATED_ATTRIBUTE;
//
// void cv::aruco::drawPlanarBoard(Ptr_Board board, Size outSize, Mat& img, int marginSize, int borderBits)
//
/**
* draw planar board
* @deprecated Use Board::generateImage
*/
+ (void)drawPlanarBoard:(Board*)board outSize:(Size2i*)outSize img:(Mat*)img marginSize:(int)marginSize borderBits:(int)borderBits NS_SWIFT_NAME(drawPlanarBoard(board:outSize:img:marginSize:borderBits:)) DEPRECATED_ATTRIBUTE;
//
// void cv::aruco::getBoardObjectAndImagePoints(Ptr_Board board, vector_Mat detectedCorners, Mat detectedIds, Mat& objPoints, Mat& imgPoints)
//
/**
* get board object and image points
* @deprecated Use Board::matchImagePoints
*/
+ (void)getBoardObjectAndImagePoints:(Board*)board detectedCorners:(NSArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds objPoints:(Mat*)objPoints imgPoints:(Mat*)imgPoints NS_SWIFT_NAME(getBoardObjectAndImagePoints(board:detectedCorners:detectedIds:objPoints:imgPoints:)) DEPRECATED_ATTRIBUTE;
//
// int cv::aruco::estimatePoseBoard(vector_Mat corners, Mat ids, Ptr_Board board, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false)
//
/**
* @deprecated Use cv::solvePnP
*/
+ (int)estimatePoseBoard:(NSArray<Mat*>*)corners ids:(Mat*)ids board:(Board*)board cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(estimatePoseBoard(corners:ids:board:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:)) DEPRECATED_ATTRIBUTE;
/**
* @deprecated Use cv::solvePnP
*/
+ (int)estimatePoseBoard:(NSArray<Mat*>*)corners ids:(Mat*)ids board:(Board*)board cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(estimatePoseBoard(corners:ids:board:cameraMatrix:distCoeffs:rvec:tvec:)) DEPRECATED_ATTRIBUTE;
//
// bool cv::aruco::estimatePoseCharucoBoard(Mat charucoCorners, Mat charucoIds, Ptr_CharucoBoard board, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false)
//
/**
* Pose estimation for a ChArUco board given some of their corners
* @param charucoCorners vector of detected charuco corners
* @param charucoIds list of identifiers for each corner in charucoCorners
* @param board layout of ChArUco board.
* @param cameraMatrix input 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* @param distCoeffs vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
* (see cv::Rodrigues).
* @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
* @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
*
* This function estimates a Charuco board pose from some detected corners.
* The function checks if the input corners are enough and valid to perform pose estimation.
* If pose estimation is valid, returns true, else returns false.
* @see `use cv::drawFrameAxes to get world coordinate system axis for object points`
*/
+ (BOOL)estimatePoseCharucoBoard:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds board:(CharucoBoard*)board cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec useExtrinsicGuess:(BOOL)useExtrinsicGuess NS_SWIFT_NAME(estimatePoseCharucoBoard(charucoCorners:charucoIds:board:cameraMatrix:distCoeffs:rvec:tvec:useExtrinsicGuess:));
/**
* Pose estimation for a ChArUco board given some of their corners
* @param charucoCorners vector of detected charuco corners
* @param charucoIds list of identifiers for each corner in charucoCorners
* @param board layout of ChArUco board.
* @param cameraMatrix input 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* @param distCoeffs vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
* (see cv::Rodrigues).
* @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
*
* This function estimates a Charuco board pose from some detected corners.
* The function checks if the input corners are enough and valid to perform pose estimation.
* If pose estimation is valid, returns true, else returns false.
* @see `use cv::drawFrameAxes to get world coordinate system axis for object points`
*/
+ (BOOL)estimatePoseCharucoBoard:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds board:(CharucoBoard*)board cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvec:(Mat*)rvec tvec:(Mat*)tvec NS_SWIFT_NAME(estimatePoseCharucoBoard(charucoCorners:charucoIds:board:cameraMatrix:distCoeffs:rvec:tvec:));
//
// void cv::aruco::estimatePoseSingleMarkers(vector_Mat corners, float markerLength, Mat cameraMatrix, Mat distCoeffs, Mat& rvecs, Mat& tvecs, Mat& objPoints = Mat(), _hidden_ estimateParameters = cv::makePtr<cv::aruco::EstimateParameters>())
//
/**
* @deprecated Use cv::solvePnP
*/
+ (void)estimatePoseSingleMarkers:(NSArray<Mat*>*)corners markerLength:(float)markerLength cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(Mat*)rvecs tvecs:(Mat*)tvecs objPoints:(Mat*)objPoints NS_SWIFT_NAME(estimatePoseSingleMarkers(corners:markerLength:cameraMatrix:distCoeffs:rvecs:tvecs:objPoints:)) DEPRECATED_ATTRIBUTE;
/**
* @deprecated Use cv::solvePnP
*/
+ (void)estimatePoseSingleMarkers:(NSArray<Mat*>*)corners markerLength:(float)markerLength cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(Mat*)rvecs tvecs:(Mat*)tvecs NS_SWIFT_NAME(estimatePoseSingleMarkers(corners:markerLength:cameraMatrix:distCoeffs:rvecs:tvecs:)) DEPRECATED_ATTRIBUTE;
//
// bool cv::aruco::testCharucoCornersCollinear(Ptr_CharucoBoard board, Mat charucoIds)
//
/**
* @deprecated Use CharucoBoard::checkCharucoCornersCollinear
*/
+ (BOOL)testCharucoCornersCollinear:(CharucoBoard*)board charucoIds:(Mat*)charucoIds NS_SWIFT_NAME(testCharucoCornersCollinear(board:charucoIds:)) DEPRECATED_ATTRIBUTE;
//
// double cv::aruco::calibrateCameraAruco(vector_Mat corners, Mat ids, Mat counter, Ptr_Board board, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
//
/**
* Calibrate a camera using aruco markers
*
* @param corners vector of detected marker corners in all frames.
* The corners should have the same format returned by detectMarkers (see #detectMarkers).
* @param ids list of identifiers for each marker in corners
* @param counter number of markers in each frame so that corners and ids can be split
* @param board Marker Board layout
* @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
* @param cameraMatrix Output 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: `$$(R_1, T_1, \dotsc , R_M, T_M)$$` where M is number of pattern views,
* `$$R_i, T_i$$` are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
* @param criteria Termination criteria for the iterative optimization algorithm.
*
* This function calibrates a camera using an Aruco Board. The function receives a list of
* detected markers from several views of the Board. The process is similar to the chessboard
* calibration in calibrateCamera(). The function returns the final re-projection error.
*/
+ (double)calibrateCameraArucoExtended:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:criteria:));
/**
* Calibrate a camera using aruco markers
*
* @param corners vector of detected marker corners in all frames.
* The corners should have the same format returned by detectMarkers (see #detectMarkers).
* @param ids list of identifiers for each marker in corners
* @param counter number of markers in each frame so that corners and ids can be split
* @param board Marker Board layout
* @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
* @param cameraMatrix Output 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: `$$(R_1, T_1, \dotsc , R_M, T_M)$$` where M is number of pattern views,
* `$$R_i, T_i$$` are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
*
* This function calibrates a camera using an Aruco Board. The function receives a list of
* detected markers from several views of the Board. The process is similar to the chessboard
* calibration in calibrateCamera(). The function returns the final re-projection error.
*/
+ (double)calibrateCameraArucoExtended:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:));
/**
* Calibrate a camera using aruco markers
*
* @param corners vector of detected marker corners in all frames.
* The corners should have the same format returned by detectMarkers (see #detectMarkers).
* @param ids list of identifiers for each marker in corners
* @param counter number of markers in each frame so that corners and ids can be split
* @param board Marker Board layout
* @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
* @param cameraMatrix Output 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: `$$(R_1, T_1, \dotsc , R_M, T_M)$$` where M is number of pattern views,
* `$$R_i, T_i$$` are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
*
* This function calibrates a camera using an Aruco Board. The function receives a list of
* detected markers from several views of the Board. The process is similar to the chessboard
* calibration in calibrateCamera(). The function returns the final re-projection error.
*/
+ (double)calibrateCameraArucoExtended:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:));
//
// double cv::aruco::calibrateCameraAruco(vector_Mat corners, Mat ids, Mat counter, Ptr_Board board, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs = vector_Mat(), vector_Mat& tvecs = vector_Mat(), int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
//
/**
*
* It's the same function as #calibrateCameraAruco but without calibration error estimation.
*/
+ (double)calibrateCameraAruco:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:criteria:));
/**
*
* It's the same function as #calibrateCameraAruco but without calibration error estimation.
*/
+ (double)calibrateCameraAruco:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:));
/**
*
* It's the same function as #calibrateCameraAruco but without calibration error estimation.
*/
+ (double)calibrateCameraAruco:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:));
/**
*
* It's the same function as #calibrateCameraAruco but without calibration error estimation.
*/
+ (double)calibrateCameraAruco:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:rvecs:));
/**
*
* It's the same function as #calibrateCameraAruco but without calibration error estimation.
*/
+ (double)calibrateCameraAruco:(NSArray<Mat*>*)corners ids:(Mat*)ids counter:(Mat*)counter board:(Board*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(calibrateCameraAruco(corners:ids:counter:board:imageSize:cameraMatrix:distCoeffs:));
//
// double cv::aruco::calibrateCameraCharuco(vector_Mat charucoCorners, vector_Mat charucoIds, Ptr_CharucoBoard board, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
//
/**
* Calibrate a camera using Charuco corners
*
* @param charucoCorners vector of detected charuco corners per frame
* @param charucoIds list of identifiers for each corner in charucoCorners per frame
* @param board Marker Board layout
* @param imageSize input image size
* @param cameraMatrix Output 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: `$$(R_1, T_1, \dotsc , R_M, T_M)$$` where M is number of pattern views,
* `$$R_i, T_i$$` are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
* @param criteria Termination criteria for the iterative optimization algorithm.
*
* This function calibrates a camera using a set of corners of a Charuco Board. The function
* receives a list of detected corners and its identifiers from several views of the Board.
* The function returns the final re-projection error.
*/
+ (double)calibrateCameraCharucoExtended:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:criteria:));
/**
* Calibrate a camera using Charuco corners
*
* @param charucoCorners vector of detected charuco corners per frame
* @param charucoIds list of identifiers for each corner in charucoCorners per frame
* @param board Marker Board layout
* @param imageSize input image size
* @param cameraMatrix Output 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: `$$(R_1, T_1, \dotsc , R_M, T_M)$$` where M is number of pattern views,
* `$$R_i, T_i$$` are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
*
* This function calibrates a camera using a set of corners of a Charuco Board. The function
* receives a list of detected corners and its identifiers from several views of the Board.
* The function returns the final re-projection error.
*/
+ (double)calibrateCameraCharucoExtended:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors flags:(int)flags NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:flags:));
/**
* Calibrate a camera using Charuco corners
*
* @param charucoCorners vector of detected charuco corners per frame
* @param charucoIds list of identifiers for each corner in charucoCorners per frame
* @param board Marker Board layout
* @param imageSize input image size
* @param cameraMatrix Output 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$` . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* `$$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)$$` If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: `$$(R_1, T_1, \dotsc , R_M, T_M)$$` where M is number of pattern views,
* `$$R_i, T_i$$` are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
*
* This function calibrates a camera using a set of corners of a Charuco Board. The function
* receives a list of detected corners and its identifiers from several views of the Board.
* The function returns the final re-projection error.
*/
+ (double)calibrateCameraCharucoExtended:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs stdDeviationsIntrinsics:(Mat*)stdDeviationsIntrinsics stdDeviationsExtrinsics:(Mat*)stdDeviationsExtrinsics perViewErrors:(Mat*)perViewErrors NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:stdDeviationsIntrinsics:stdDeviationsExtrinsics:perViewErrors:));
//
// double cv::aruco::calibrateCameraCharuco(vector_Mat charucoCorners, vector_Mat charucoIds, Ptr_CharucoBoard board, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs = vector_Mat(), vector_Mat& tvecs = vector_Mat(), int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
//
/**
* It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*/
+ (double)calibrateCameraCharuco:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags criteria:(TermCriteria*)criteria NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:criteria:));
/**
* It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*/
+ (double)calibrateCameraCharuco:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs flags:(int)flags NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:flags:));
/**
* It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*/
+ (double)calibrateCameraCharuco:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs tvecs:(NSMutableArray<Mat*>*)tvecs NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:tvecs:));
/**
* It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*/
+ (double)calibrateCameraCharuco:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs rvecs:(NSMutableArray<Mat*>*)rvecs NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:rvecs:));
/**
* It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*/
+ (double)calibrateCameraCharuco:(NSArray<Mat*>*)charucoCorners charucoIds:(NSArray<Mat*>*)charucoIds board:(CharucoBoard*)board imageSize:(Size2i*)imageSize cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(calibrateCameraCharuco(charucoCorners:charucoIds:board:imageSize:cameraMatrix:distCoeffs:));
//
// int cv::aruco::interpolateCornersCharuco(vector_Mat markerCorners, Mat markerIds, Mat image, Ptr_CharucoBoard board, Mat& charucoCorners, Mat& charucoIds, Mat cameraMatrix = Mat(), Mat distCoeffs = Mat(), int minMarkers = 2)
//
/**
* Interpolate position of ChArUco board corners
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param markerIds list of identifiers for each marker in corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param board layout of ChArUco board.
* @param charucoCorners interpolated chessboard corners
* @param charucoIds interpolated chessboard corners identifiers
* @param cameraMatrix optional 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* @param distCoeffs optional vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param minMarkers number of adjacent markers that must be detected to return a charuco corner
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
* the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is
* also returned in charucoIds.
* The function returns the number of interpolated corners.
*
* @deprecated Use CharucoDetector::detectBoard
*/
+ (int)interpolateCornersCharuco:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds image:(Mat*)image board:(CharucoBoard*)board charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs minMarkers:(int)minMarkers NS_SWIFT_NAME(interpolateCornersCharuco(markerCorners:markerIds:image:board:charucoCorners:charucoIds:cameraMatrix:distCoeffs:minMarkers:)) DEPRECATED_ATTRIBUTE;
/**
* Interpolate position of ChArUco board corners
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param markerIds list of identifiers for each marker in corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param board layout of ChArUco board.
* @param charucoCorners interpolated chessboard corners
* @param charucoIds interpolated chessboard corners identifiers
* @param cameraMatrix optional 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* @param distCoeffs optional vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
* the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is
* also returned in charucoIds.
* The function returns the number of interpolated corners.
*
* @deprecated Use CharucoDetector::detectBoard
*/
+ (int)interpolateCornersCharuco:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds image:(Mat*)image board:(CharucoBoard*)board charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(interpolateCornersCharuco(markerCorners:markerIds:image:board:charucoCorners:charucoIds:cameraMatrix:distCoeffs:)) DEPRECATED_ATTRIBUTE;
/**
* Interpolate position of ChArUco board corners
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param markerIds list of identifiers for each marker in corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param board layout of ChArUco board.
* @param charucoCorners interpolated chessboard corners
* @param charucoIds interpolated chessboard corners identifiers
* @param cameraMatrix optional 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
* the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is
* also returned in charucoIds.
* The function returns the number of interpolated corners.
*
* @deprecated Use CharucoDetector::detectBoard
*/
+ (int)interpolateCornersCharuco:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds image:(Mat*)image board:(CharucoBoard*)board charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds cameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(interpolateCornersCharuco(markerCorners:markerIds:image:board:charucoCorners:charucoIds:cameraMatrix:)) DEPRECATED_ATTRIBUTE;
/**
* Interpolate position of ChArUco board corners
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param markerIds list of identifiers for each marker in corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param board layout of ChArUco board.
* @param charucoCorners interpolated chessboard corners
* @param charucoIds interpolated chessboard corners identifiers
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
* the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is
* also returned in charucoIds.
* The function returns the number of interpolated corners.
*
* @deprecated Use CharucoDetector::detectBoard
*/
+ (int)interpolateCornersCharuco:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds image:(Mat*)image board:(CharucoBoard*)board charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds NS_SWIFT_NAME(interpolateCornersCharuco(markerCorners:markerIds:image:board:charucoCorners:charucoIds:)) DEPRECATED_ATTRIBUTE;
//
// void cv::aruco::detectCharucoDiamond(Mat image, vector_Mat markerCorners, Mat markerIds, float squareMarkerLengthRate, vector_Mat& diamondCorners, Mat& diamondIds, Mat cameraMatrix = Mat(), Mat distCoeffs = Mat(), Ptr_Dictionary dictionary = makePtr<Dictionary> (getPredefinedDictionary(PredefinedDictionaryType::DICT_4X4_50)))
//
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param markerCorners list of detected marker corners from detectMarkers function.
* @param markerIds list of marker ids in markerCorners.
* @param squareMarkerLengthRate rate between square and marker length:
* squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param cameraMatrix Optional camera calibration matrix.
* @param distCoeffs Optional camera distortion coefficients.
* @param dictionary dictionary of markers indicating the type of markers.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*
* @deprecated Use CharucoDetector::detectDiamonds
*/
+ (void)detectCharucoDiamond:(Mat*)image markerCorners:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds squareMarkerLengthRate:(float)squareMarkerLengthRate diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs dictionary:(Dictionary*)dictionary NS_SWIFT_NAME(detectCharucoDiamond(image:markerCorners:markerIds:squareMarkerLengthRate:diamondCorners:diamondIds:cameraMatrix:distCoeffs:dictionary:)) DEPRECATED_ATTRIBUTE;
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param markerCorners list of detected marker corners from detectMarkers function.
* @param markerIds list of marker ids in markerCorners.
* @param squareMarkerLengthRate rate between square and marker length:
* squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param cameraMatrix Optional camera calibration matrix.
* @param distCoeffs Optional camera distortion coefficients.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*
* @deprecated Use CharucoDetector::detectDiamonds
*/
+ (void)detectCharucoDiamond:(Mat*)image markerCorners:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds squareMarkerLengthRate:(float)squareMarkerLengthRate diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(detectCharucoDiamond(image:markerCorners:markerIds:squareMarkerLengthRate:diamondCorners:diamondIds:cameraMatrix:distCoeffs:)) DEPRECATED_ATTRIBUTE;
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param markerCorners list of detected marker corners from detectMarkers function.
* @param markerIds list of marker ids in markerCorners.
* @param squareMarkerLengthRate rate between square and marker length:
* squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param cameraMatrix Optional camera calibration matrix.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*
* @deprecated Use CharucoDetector::detectDiamonds
*/
+ (void)detectCharucoDiamond:(Mat*)image markerCorners:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds squareMarkerLengthRate:(float)squareMarkerLengthRate diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds cameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(detectCharucoDiamond(image:markerCorners:markerIds:squareMarkerLengthRate:diamondCorners:diamondIds:cameraMatrix:)) DEPRECATED_ATTRIBUTE;
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param markerCorners list of detected marker corners from detectMarkers function.
* @param markerIds list of marker ids in markerCorners.
* @param squareMarkerLengthRate rate between square and marker length:
* squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*
* @deprecated Use CharucoDetector::detectDiamonds
*/
+ (void)detectCharucoDiamond:(Mat*)image markerCorners:(NSArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds squareMarkerLengthRate:(float)squareMarkerLengthRate diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds NS_SWIFT_NAME(detectCharucoDiamond(image:markerCorners:markerIds:squareMarkerLengthRate:diamondCorners:diamondIds:)) DEPRECATED_ATTRIBUTE;
//
// void cv::aruco::drawCharucoDiamond(Ptr_Dictionary dictionary, Vec4i ids, int squareLength, int markerLength, Mat& img, int marginSize = 0, int borderBits = 1)
//
/**
* Draw a ChArUco Diamond marker
*
* @param dictionary dictionary of markers indicating the type of markers.
* @param ids list of 4 ids for each ArUco marker in the ChArUco marker.
* @param squareLength size of the chessboard squares in pixels.
* @param markerLength size of the markers in pixels.
* @param img output image with the marker. The size of this image will be
* 3*squareLength + 2*marginSize,.
* @param marginSize minimum margins (in pixels) of the marker in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of a ChArUco marker, ready to be printed.
*/
+ (void)drawCharucoDiamond:(Dictionary*)dictionary ids:(Int4*)ids squareLength:(int)squareLength markerLength:(int)markerLength img:(Mat*)img marginSize:(int)marginSize borderBits:(int)borderBits NS_SWIFT_NAME(drawCharucoDiamond(dictionary:ids:squareLength:markerLength:img:marginSize:borderBits:));
/**
* Draw a ChArUco Diamond marker
*
* @param dictionary dictionary of markers indicating the type of markers.
* @param ids list of 4 ids for each ArUco marker in the ChArUco marker.
* @param squareLength size of the chessboard squares in pixels.
* @param markerLength size of the markers in pixels.
* @param img output image with the marker. The size of this image will be
* 3*squareLength + 2*marginSize,.
* @param marginSize minimum margins (in pixels) of the marker in the output image
*
* This function return the image of a ChArUco marker, ready to be printed.
*/
+ (void)drawCharucoDiamond:(Dictionary*)dictionary ids:(Int4*)ids squareLength:(int)squareLength markerLength:(int)markerLength img:(Mat*)img marginSize:(int)marginSize NS_SWIFT_NAME(drawCharucoDiamond(dictionary:ids:squareLength:markerLength:img:marginSize:));
/**
* Draw a ChArUco Diamond marker
*
* @param dictionary dictionary of markers indicating the type of markers.
* @param ids list of 4 ids for each ArUco marker in the ChArUco marker.
* @param squareLength size of the chessboard squares in pixels.
* @param markerLength size of the markers in pixels.
* @param img output image with the marker. The size of this image will be
* 3*squareLength + 2*marginSize,.
*
* This function return the image of a ChArUco marker, ready to be printed.
*/
+ (void)drawCharucoDiamond:(Dictionary*)dictionary ids:(Int4*)ids squareLength:(int)squareLength markerLength:(int)markerLength img:(Mat*)img NS_SWIFT_NAME(drawCharucoDiamond(dictionary:ids:squareLength:markerLength:img:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,282 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/aruco_detector.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Board;
@class DetectorParameters;
@class Dictionary;
@class Mat;
@class RefineParameters;
NS_ASSUME_NONNULL_BEGIN
// C++: class ArucoDetector
/**
* The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method.
*
* After detecting some markers in the image, you can try to find undetected markers from this dictionary with
* refineDetectedMarkers() method.
*
* @see DetectorParameters, RefineParameters
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface ArucoDetector : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::ArucoDetector> nativePtrArucoDetector;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::ArucoDetector>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::ArucoDetector>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::ArucoDetector::ArucoDetector( _hidden_ dictionary = getPredefinedDictionary(cv::aruco::DICT_4X4_50), DetectorParameters detectorParams = DetectorParameters(), RefineParameters refineParams = RefineParameters())
//
/**
* Basic ArucoDetector constructor
*
* @param dictionary indicates the type of markers that will be searched
* @param detectorParams marker detection parameters
* @param refineParams marker refine detection parameters
*/
- (instancetype)initWithDictionary:(DetectorParameters*)detectorParams refineParams:(RefineParameters*)refineParams;
/**
* Basic ArucoDetector constructor
*
* @param dictionary indicates the type of markers that will be searched
* @param detectorParams marker detection parameters
*/
- (instancetype)initWithDictionary:(DetectorParameters*)detectorParams;
/**
* Basic ArucoDetector constructor
*
* @param dictionary indicates the type of markers that will be searched
*/
- (instancetype)initWithDictionary;
//
// void cv::aruco::ArucoDetector::detectMarkers(Mat image, vector_Mat& corners, Mat& ids, vector_Mat& rejectedImgPoints = vector_Mat())
//
/**
* Basic marker detection
*
* @param image input image
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
* correct codification. Useful for debugging purposes.
*
* Performs marker detection in the input image. Only markers included in the specific dictionary
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* NOTE: The function does not correct lens distortion or takes it into account. It's recommended to undistort
* input image with corresponding camera model, if camera parameters are known
* @see `undistort`, `estimatePoseSingleMarkers`, `estimatePoseBoard`
*/
- (void)detectMarkers:(Mat*)image corners:(NSMutableArray<Mat*>*)corners ids:(Mat*)ids rejectedImgPoints:(NSMutableArray<Mat*>*)rejectedImgPoints NS_SWIFT_NAME(detectMarkers(image:corners:ids:rejectedImgPoints:));
/**
* Basic marker detection
*
* @param image input image
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* correct codification. Useful for debugging purposes.
*
* Performs marker detection in the input image. Only markers included in the specific dictionary
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* NOTE: The function does not correct lens distortion or takes it into account. It's recommended to undistort
* input image with corresponding camera model, if camera parameters are known
* @see `undistort`, `estimatePoseSingleMarkers`, `estimatePoseBoard`
*/
- (void)detectMarkers:(Mat*)image corners:(NSMutableArray<Mat*>*)corners ids:(Mat*)ids NS_SWIFT_NAME(detectMarkers(image:corners:ids:));
//
// void cv::aruco::ArucoDetector::refineDetectedMarkers(Mat image, Board board, vector_Mat& detectedCorners, Mat& detectedIds, vector_Mat& rejectedCorners, Mat cameraMatrix = Mat(), Mat distCoeffs = Mat(), Mat& recoveredIdxs = Mat())
//
/**
* Refine not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* @param distCoeffs optional vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
* original rejectedCorners array.
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
*/
- (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs recoveredIdxs:(Mat*)recoveredIdxs NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:recoveredIdxs:));
/**
* Refine not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* @param distCoeffs optional vector of distortion coefficients
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* original rejectedCorners array.
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
*/
- (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix distCoeffs:(Mat*)distCoeffs NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:distCoeffs:));
/**
* Refine not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* original rejectedCorners array.
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
*/
- (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners cameraMatrix:(Mat*)cameraMatrix NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:cameraMatrix:));
/**
* Refine not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* `$$\newcommand{\vecthreethree}[9]{ \begin{bmatrix} #1 & #2 & #3\\\\ #4 & #5 & #6\\\\ #7 & #8 & #9 \end{bmatrix} } A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}$$`
* `$$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])$$` of 4, 5, 8 or 12 elements
* original rejectedCorners array.
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
*/
- (void)refineDetectedMarkers:(Mat*)image board:(Board*)board detectedCorners:(NSMutableArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds rejectedCorners:(NSMutableArray<Mat*>*)rejectedCorners NS_SWIFT_NAME(refineDetectedMarkers(image:board:detectedCorners:detectedIds:rejectedCorners:));
//
// Dictionary cv::aruco::ArucoDetector::getDictionary()
//
- (Dictionary*)getDictionary NS_SWIFT_NAME(getDictionary());
//
// void cv::aruco::ArucoDetector::setDictionary(Dictionary dictionary)
//
- (void)setDictionary:(Dictionary*)dictionary NS_SWIFT_NAME(setDictionary(dictionary:));
//
// DetectorParameters cv::aruco::ArucoDetector::getDetectorParameters()
//
- (DetectorParameters*)getDetectorParameters NS_SWIFT_NAME(getDetectorParameters());
//
// void cv::aruco::ArucoDetector::setDetectorParameters(DetectorParameters detectorParameters)
//
- (void)setDetectorParameters:(DetectorParameters*)detectorParameters NS_SWIFT_NAME(setDetectorParameters(detectorParameters:));
//
// RefineParameters cv::aruco::ArucoDetector::getRefineParameters()
//
- (RefineParameters*)getRefineParameters NS_SWIFT_NAME(getRefineParameters());
//
// void cv::aruco::ArucoDetector::setRefineParameters(RefineParameters refineParameters)
//
- (void)setRefineParameters:(RefineParameters*)refineParameters NS_SWIFT_NAME(setRefineParameters(refineParameters:));
//
// void cv::aruco::ArucoDetector::write(FileStorage fs, String name)
//
// Unknown type 'FileStorage' (I), skipping the function
//
// void cv::aruco::ArucoDetector::read(FileNode fn)
//
// Unknown type 'FileNode' (I), skipping the function
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,59 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/img_hash.hpp"
#import "opencv2/img_hash/average_hash.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "ImgHashBase.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class AverageHash
/**
* Computes average hash value of the input image
*
* This is a fast image hashing algorithm, but only work on simple case. For more details, please
* refer to CITE: lookslikeit
*
* Member of `Img_hash`
*/
CV_EXPORTS @interface AverageHash : ImgHashBase
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::img_hash::AverageHash> nativePtrAverageHash;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::img_hash::AverageHash>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::img_hash::AverageHash>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_AverageHash cv::img_hash::AverageHash::create()
//
+ (AverageHash*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,121 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/xfeatures2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
// C++: enum BeblidSize (cv.xfeatures2d.BEBLID.BeblidSize)
typedef NS_ENUM(int, BeblidSize) {
BEBLID_SIZE_512_BITS NS_SWIFT_NAME(SIZE_512_BITS) = 100,
BEBLID_SIZE_256_BITS NS_SWIFT_NAME(SIZE_256_BITS) = 101
};
NS_ASSUME_NONNULL_BEGIN
// C++: class BEBLID
/**
* Class implementing BEBLID (Boosted Efficient Binary Local Image Descriptor),
* described in CITE: Suarez2020BEBLID .
*
* BEBLID \cite Suarez2020BEBLID is a efficient binary descriptor learned with boosting.
* It is able to describe keypoints from any detector just by changing the scale_factor parameter.
* In several benchmarks it has proved to largely improve other binary descriptors like ORB or
* BRISK with the same efficiency. BEBLID describes using the difference of mean gray values in
* different regions of the image around the KeyPoint, the descriptor is specifically optimized for
* image matching and patch retrieval addressing the asymmetries of these problems.
*
* If you find this code useful, please add a reference to the following paper:
* <BLOCKQUOTE> Iago Suárez, Ghesn Sfeir, José M. Buenaposada, and Luis Baumela.
* BEBLID: Boosted efficient binary local image descriptor.
* Pattern Recognition Letters, 133:366372, 2020. </BLOCKQUOTE>
*
* The descriptor was trained using 1 million of randomly sampled pairs of patches
* (20% positives and 80% negatives) from the Liberty split of the UBC datasets
* \cite winder2007learning as described in the paper CITE: Suarez2020BEBLID.
* You can check in the [AKAZE example](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
* how well BEBLID works. Detecting 10000 keypoints with ORB and describing with BEBLID obtains
* 561 inliers (75%) whereas describing with ORB obtains only 493 inliers (63%).
*
* Member of `Xfeatures2d`
*/
CV_EXPORTS @interface BEBLID : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::xfeatures2d::BEBLID> nativePtrBEBLID;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::xfeatures2d::BEBLID>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::xfeatures2d::BEBLID>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_BEBLID cv::xfeatures2d::BEBLID::create(float scale_factor, int n_bits = BEBLID::SIZE_512_BITS)
//
/**
* Creates the BEBLID descriptor.
* @param scale_factor Adjust the sampling window around detected keypoints:
* - <b> 1.00f </b> should be the scale for ORB keypoints
* - <b> 6.75f </b> should be the scale for SIFT detected keypoints
* - <b> 6.25f </b> is default and fits for KAZE, SURF detected keypoints
* - <b> 5.00f </b> should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints
* @param n_bits Determine the number of bits in the descriptor. Should be either
* BEBLID::SIZE_512_BITS or BEBLID::SIZE_256_BITS.
*/
+ (BEBLID*)create:(float)scale_factor n_bits:(int)n_bits NS_SWIFT_NAME(create(scale_factor:n_bits:));
/**
* Creates the BEBLID descriptor.
* @param scale_factor Adjust the sampling window around detected keypoints:
* - <b> 1.00f </b> should be the scale for ORB keypoints
* - <b> 6.75f </b> should be the scale for SIFT detected keypoints
* - <b> 6.25f </b> is default and fits for KAZE, SURF detected keypoints
* - <b> 5.00f </b> should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints
* BEBLID::SIZE_512_BITS or BEBLID::SIZE_256_BITS.
*/
+ (BEBLID*)create:(float)scale_factor NS_SWIFT_NAME(create(scale_factor:));
//
// void cv::xfeatures2d::BEBLID::setScaleFactor(float scale_factor)
//
- (void)setScaleFactor:(float)scale_factor NS_SWIFT_NAME(setScaleFactor(scale_factor:));
//
// float cv::xfeatures2d::BEBLID::getScaleFactor()
//
- (float)getScaleFactor NS_SWIFT_NAME(getScaleFactor());
//
// String cv::xfeatures2d::BEBLID::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,124 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "DescriptorMatcher.h"
#import "Core.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BFMatcher
/**
* Brute-force descriptor matcher.
*
* For each descriptor in the first set, this matcher finds the closest descriptor in the second set
* by trying each one. This descriptor matcher supports masking permissible matches of descriptor
* sets.
*
* Member of `Features2d`
*/
CV_EXPORTS @interface BFMatcher : DescriptorMatcher
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BFMatcher> nativePtrBFMatcher;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BFMatcher>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BFMatcher>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::BFMatcher::BFMatcher(NormTypes normType = NORM_L2, bool crossCheck = false)
//
/**
* Brute-force matcher constructor (obsolete). Please use BFMatcher.create()
*
*
*/
- (instancetype)initWithNormType:(NormTypes)normType crossCheck:(BOOL)crossCheck;
/**
* Brute-force matcher constructor (obsolete). Please use BFMatcher.create()
*
*
*/
- (instancetype)initWithNormType:(NormTypes)normType;
/**
* Brute-force matcher constructor (obsolete). Please use BFMatcher.create()
*
*
*/
- (instancetype)init;
//
// static Ptr_BFMatcher cv::BFMatcher::create(NormTypes normType = NORM_L2, bool crossCheck = false)
//
/**
* Brute-force matcher create method.
* @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are
* preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
* BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
* description).
* @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k
* nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
* k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the
* matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent
* pairs. Such technique usually produces best results with minimal number of outliers when there are
* enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
*/
+ (BFMatcher*)createBFMatcher:(NormTypes)normType crossCheck:(BOOL)crossCheck NS_SWIFT_NAME(create(normType:crossCheck:));
/**
* Brute-force matcher create method.
* @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are
* preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
* BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
* description).
* nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
* k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the
* matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent
* pairs. Such technique usually produces best results with minimal number of outliers when there are
* enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
*/
+ (BFMatcher*)createBFMatcher:(NormTypes)normType NS_SWIFT_NAME(create(normType:));
/**
* Brute-force matcher create method.
* preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
* BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
* description).
* nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
* k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the
* matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent
* pairs. Such technique usually produces best results with minimal number of outliers when there are
* enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
*/
+ (BFMatcher*)createBFMatcher NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,104 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "face/bif.hpp"
#import "opencv2/face/bif.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BIF
/**
* Implementation of bio-inspired features (BIF) from the paper:
* Guo, Guodong, et al. "Human age estimation using bio-inspired features."
* Computer Vision and Pattern Recognition, 2009. CVPR 2009.
*
* Member of `Face`
*/
CV_EXPORTS @interface BIF : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::BIF> nativePtrBIF;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::BIF>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::BIF>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::face::BIF::getNumBands()
//
/**
* @return The number of filter bands used for computing BIF.
*/
- (int)getNumBands NS_SWIFT_NAME(getNumBands());
//
// int cv::face::BIF::getNumRotations()
//
/**
* @return The number of image rotations.
*/
- (int)getNumRotations NS_SWIFT_NAME(getNumRotations());
//
// void cv::face::BIF::compute(Mat image, Mat& features)
//
/**
* Computes features sby input image.
* @param image Input image (CV_32FC1).
* @param features Feature vector (CV_32FC1).
*/
- (void)compute:(Mat*)image features:(Mat*)features NS_SWIFT_NAME(compute(image:features:));
//
// static Ptr_BIF cv::face::BIF::create(int num_bands = 8, int num_rotations = 12)
//
/**
* @param num_bands The number of filter bands (<=8) used for computing BIF.
* @param num_rotations The number of image rotations for computing BIF.
* @return Object for computing BIF.
*/
+ (BIF*)create:(int)num_bands num_rotations:(int)num_rotations NS_SWIFT_NAME(create(num_bands:num_rotations:));
/**
* @param num_bands The number of filter bands (<=8) used for computing BIF.
* @return Object for computing BIF.
*/
+ (BIF*)create:(int)num_bands NS_SWIFT_NAME(create(num_bands:));
/**
* @return Object for computing BIF.
*/
+ (BIF*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,125 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class DescriptorMatcher;
@class Feature2D;
@class KeyPoint;
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BOWImgDescriptorExtractor
/**
* Class to compute an image descriptor using the *bag of visual words*.
*
* Such a computation consists of the following steps:
*
* 1. Compute descriptors for a given image and its keypoints set.
* 2. Find the nearest visual words from the vocabulary for each keypoint descriptor.
* 3. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words
* encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the
* vocabulary in the given image.
*
* Member of `Features2d`
*/
CV_EXPORTS @interface BOWImgDescriptorExtractor : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BOWImgDescriptorExtractor> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BOWImgDescriptorExtractor>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BOWImgDescriptorExtractor>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::BOWImgDescriptorExtractor::BOWImgDescriptorExtractor(Ptr_Feature2D dextractor, Ptr_DescriptorMatcher dmatcher)
//
/**
* The constructor.
*
* @param dextractor Descriptor extractor that is used to compute descriptors for an input image and
* its keypoints.
* @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary
* for each keypoint descriptor of the image.
*/
- (instancetype)initWithDextractor:(Feature2D*)dextractor dmatcher:(DescriptorMatcher*)dmatcher;
//
// void cv::BOWImgDescriptorExtractor::setVocabulary(Mat vocabulary)
//
/**
* Sets a visual vocabulary.
*
* @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the
* vocabulary is a visual word (cluster center).
*/
- (void)setVocabulary:(Mat*)vocabulary NS_SWIFT_NAME(setVocabulary(vocabulary:));
//
// Mat cv::BOWImgDescriptorExtractor::getVocabulary()
//
/**
* Returns the set vocabulary.
*/
- (Mat*)getVocabulary NS_SWIFT_NAME(getVocabulary());
//
// void cv::BOWImgDescriptorExtractor::compute2(Mat image, vector_KeyPoint keypoints, Mat& imgDescriptor)
//
/**
*
* @param imgDescriptor Computed output image descriptor.
* pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)
* returned if it is non-zero.
*/
- (void)compute:(Mat*)image keypoints:(NSArray<KeyPoint*>*)keypoints imgDescriptor:(Mat*)imgDescriptor NS_SWIFT_NAME(compute2(image:keypoints:imgDescriptor:));
//
// int cv::BOWImgDescriptorExtractor::descriptorSize()
//
/**
* Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0.
*/
- (int)descriptorSize NS_SWIFT_NAME(descriptorSize());
//
// int cv::BOWImgDescriptorExtractor::descriptorType()
//
/**
* Returns an image descriptor type.
*/
- (int)descriptorType NS_SWIFT_NAME(descriptorType());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,94 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BOWTrainer.h"
@class Mat;
@class TermCriteria;
NS_ASSUME_NONNULL_BEGIN
// C++: class BOWKMeansTrainer
/**
* kmeans -based class to train visual vocabulary using the *bag of visual words* approach. :
*
* Member of `Features2d`
*/
CV_EXPORTS @interface BOWKMeansTrainer : BOWTrainer
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BOWKMeansTrainer> nativePtrBOWKMeansTrainer;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BOWKMeansTrainer>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BOWKMeansTrainer>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::BOWKMeansTrainer::BOWKMeansTrainer(int clusterCount, TermCriteria termcrit = TermCriteria(), int attempts = 3, int flags = KMEANS_PP_CENTERS)
//
/**
* The constructor.
*
* @see `cv::kmeans`
*/
- (instancetype)initWithClusterCount:(int)clusterCount termcrit:(TermCriteria*)termcrit attempts:(int)attempts flags:(int)flags;
/**
* The constructor.
*
* @see `cv::kmeans`
*/
- (instancetype)initWithClusterCount:(int)clusterCount termcrit:(TermCriteria*)termcrit attempts:(int)attempts;
/**
* The constructor.
*
* @see `cv::kmeans`
*/
- (instancetype)initWithClusterCount:(int)clusterCount termcrit:(TermCriteria*)termcrit;
/**
* The constructor.
*
* @see `cv::kmeans`
*/
- (instancetype)initWithClusterCount:(int)clusterCount;
//
// Mat cv::BOWKMeansTrainer::cluster()
//
- (Mat*)cluster NS_SWIFT_NAME(cluster());
//
// Mat cv::BOWKMeansTrainer::cluster(Mat descriptors)
//
- (Mat*)cluster:(Mat*)descriptors NS_SWIFT_NAME(cluster(descriptors:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,112 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BOWTrainer
/**
* Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors.
*
* For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka,
* Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :
*
* Member of `Features2d`
*/
CV_EXPORTS @interface BOWTrainer : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BOWTrainer> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BOWTrainer>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BOWTrainer>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::BOWTrainer::add(Mat descriptors)
//
/**
* Adds descriptors to a training set.
*
* @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a
* descriptor.
*
* The training set is clustered using clustermethod to construct the vocabulary.
*/
- (void)add:(Mat*)descriptors NS_SWIFT_NAME(add(descriptors:));
//
// vector_Mat cv::BOWTrainer::getDescriptors()
//
/**
* Returns a training set of descriptors.
*/
- (NSArray<Mat*>*)getDescriptors NS_SWIFT_NAME(getDescriptors());
//
// int cv::BOWTrainer::descriptorsCount()
//
/**
* Returns the count of all descriptors stored in the training set.
*/
- (int)descriptorsCount NS_SWIFT_NAME(descriptorsCount());
//
// void cv::BOWTrainer::clear()
//
- (void)clear NS_SWIFT_NAME(clear());
//
// Mat cv::BOWTrainer::cluster()
//
- (Mat*)cluster NS_SWIFT_NAME(cluster());
//
// Mat cv::BOWTrainer::cluster(Mat descriptors)
//
/**
* Clusters train descriptors.
*
* @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor.
* Descriptors are not added to the inner train descriptor set.
*
* The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first
* variant of the method, train descriptors stored in the object are clustered. In the second variant,
* input descriptors are clustered.
*/
- (Mat*)cluster:(Mat*)descriptors NS_SWIFT_NAME(cluster(descriptors:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,267 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
@class FloatVector;
@class IntVector;
NS_ASSUME_NONNULL_BEGIN
// C++: class BRISK
/**
* Class implementing the BRISK keypoint detector and descriptor extractor, described in CITE: LCS11 .
*
* Member of `Features2d`
*/
CV_EXPORTS @interface BRISK : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BRISK> nativePtrBRISK;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BRISK>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BRISK>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_BRISK cv::BRISK::create(int thresh = 30, int octaves = 3, float patternScale = 1.0f)
//
/**
* The BRISK constructor
*
* @param thresh AGAST detection threshold score.
* @param octaves detection octaves. Use 0 to do single scale.
* @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a
* keypoint.
*/
+ (BRISK*)create:(int)thresh octaves:(int)octaves patternScale:(float)patternScale NS_SWIFT_NAME(create(thresh:octaves:patternScale:));
/**
* The BRISK constructor
*
* @param thresh AGAST detection threshold score.
* @param octaves detection octaves. Use 0 to do single scale.
* keypoint.
*/
+ (BRISK*)create:(int)thresh octaves:(int)octaves NS_SWIFT_NAME(create(thresh:octaves:));
/**
* The BRISK constructor
*
* @param thresh AGAST detection threshold score.
* keypoint.
*/
+ (BRISK*)create:(int)thresh NS_SWIFT_NAME(create(thresh:));
/**
* The BRISK constructor
*
* keypoint.
*/
+ (BRISK*)create NS_SWIFT_NAME(create());
//
// static Ptr_BRISK cv::BRISK::create(vector_float radiusList, vector_int numberList, float dMax = 5.85f, float dMin = 8.2f, vector_int indexChange = std::vector<int>())
//
/**
* The BRISK constructor for a custom pattern
*
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
* scale 1).
* @param dMin threshold for the long pairings used for orientation determination (in pixels for
* keypoint scale 1).
* @param indexChange index remapping of the bits.
*/
+ (BRISK*)create:(FloatVector*)radiusList numberList:(IntVector*)numberList dMax:(float)dMax dMin:(float)dMin indexChange:(IntVector*)indexChange NS_SWIFT_NAME(create(radiusList:numberList:dMax:dMin:indexChange:));
/**
* The BRISK constructor for a custom pattern
*
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
* scale 1).
* @param dMin threshold for the long pairings used for orientation determination (in pixels for
* keypoint scale 1).
*/
+ (BRISK*)create:(FloatVector*)radiusList numberList:(IntVector*)numberList dMax:(float)dMax dMin:(float)dMin NS_SWIFT_NAME(create(radiusList:numberList:dMax:dMin:));
/**
* The BRISK constructor for a custom pattern
*
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
* scale 1).
* keypoint scale 1).
*/
+ (BRISK*)create:(FloatVector*)radiusList numberList:(IntVector*)numberList dMax:(float)dMax NS_SWIFT_NAME(create(radiusList:numberList:dMax:));
/**
* The BRISK constructor for a custom pattern
*
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* scale 1).
* keypoint scale 1).
*/
+ (BRISK*)create:(FloatVector*)radiusList numberList:(IntVector*)numberList NS_SWIFT_NAME(create(radiusList:numberList:));
//
// static Ptr_BRISK cv::BRISK::create(int thresh, int octaves, vector_float radiusList, vector_int numberList, float dMax = 5.85f, float dMin = 8.2f, vector_int indexChange = std::vector<int>())
//
/**
* The BRISK constructor for a custom pattern, detection threshold and octaves
*
* @param thresh AGAST detection threshold score.
* @param octaves detection octaves. Use 0 to do single scale.
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
* scale 1).
* @param dMin threshold for the long pairings used for orientation determination (in pixels for
* keypoint scale 1).
* @param indexChange index remapping of the bits.
*/
+ (BRISK*)create:(int)thresh octaves:(int)octaves radiusList:(FloatVector*)radiusList numberList:(IntVector*)numberList dMax:(float)dMax dMin:(float)dMin indexChange:(IntVector*)indexChange NS_SWIFT_NAME(create(thresh:octaves:radiusList:numberList:dMax:dMin:indexChange:));
/**
* The BRISK constructor for a custom pattern, detection threshold and octaves
*
* @param thresh AGAST detection threshold score.
* @param octaves detection octaves. Use 0 to do single scale.
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
* scale 1).
* @param dMin threshold for the long pairings used for orientation determination (in pixels for
* keypoint scale 1).
*/
+ (BRISK*)create:(int)thresh octaves:(int)octaves radiusList:(FloatVector*)radiusList numberList:(IntVector*)numberList dMax:(float)dMax dMin:(float)dMin NS_SWIFT_NAME(create(thresh:octaves:radiusList:numberList:dMax:dMin:));
/**
* The BRISK constructor for a custom pattern, detection threshold and octaves
*
* @param thresh AGAST detection threshold score.
* @param octaves detection octaves. Use 0 to do single scale.
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
* scale 1).
* keypoint scale 1).
*/
+ (BRISK*)create:(int)thresh octaves:(int)octaves radiusList:(FloatVector*)radiusList numberList:(IntVector*)numberList dMax:(float)dMax NS_SWIFT_NAME(create(thresh:octaves:radiusList:numberList:dMax:));
/**
* The BRISK constructor for a custom pattern, detection threshold and octaves
*
* @param thresh AGAST detection threshold score.
* @param octaves detection octaves. Use 0 to do single scale.
* @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
* keypoint scale 1).
* @param numberList defines the number of sampling points on the sampling circle. Must be the same
* size as radiusList..
* scale 1).
* keypoint scale 1).
*/
+ (BRISK*)create:(int)thresh octaves:(int)octaves radiusList:(FloatVector*)radiusList numberList:(IntVector*)numberList NS_SWIFT_NAME(create(thresh:octaves:radiusList:numberList:));
//
// String cv::BRISK::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
//
// void cv::BRISK::setThreshold(int threshold)
//
/**
* Set detection threshold.
* @param threshold AGAST detection threshold score.
*/
- (void)setThreshold:(int)threshold NS_SWIFT_NAME(setThreshold(threshold:));
//
// int cv::BRISK::getThreshold()
//
- (int)getThreshold NS_SWIFT_NAME(getThreshold());
//
// void cv::BRISK::setOctaves(int octaves)
//
/**
* Set detection octaves.
* @param octaves detection octaves. Use 0 to do single scale.
*/
- (void)setOctaves:(int)octaves NS_SWIFT_NAME(setOctaves(octaves:));
//
// int cv::BRISK::getOctaves()
//
- (int)getOctaves NS_SWIFT_NAME(getOctaves());
//
// void cv::BRISK::setPatternScale(float patternScale)
//
/**
* Set detection patternScale.
* @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a
* keypoint.
*/
- (void)setPatternScale:(float)patternScale NS_SWIFT_NAME(setPatternScale(patternScale:));
//
// float cv::BRISK::getPatternScale()
//
- (float)getPatternScale NS_SWIFT_NAME(getPatternScale());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,94 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/video.hpp"
#import "opencv2/video/background_segm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractor
/**
* Base class for background/foreground segmentation. :
*
* The class is only used to define the common interface for the whole family of background/foreground
* segmentation algorithms.
*
* Member of `Video`
*/
CV_EXPORTS @interface BackgroundSubtractor : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BackgroundSubtractor> nativePtrBackgroundSubtractor;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BackgroundSubtractor>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BackgroundSubtractor>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::BackgroundSubtractor::apply(Mat image, Mat& fgmask, double learningRate = -1)
//
/**
* Computes a foreground mask.
*
* @param image Next video frame.
* @param fgmask The output foreground mask as an 8-bit binary image.
* @param learningRate The value between 0 and 1 that indicates how fast the background model is
* learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
* rate. 0 means that the background model is not updated at all, 1 means that the background model
* is completely reinitialized from the last frame.
*/
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask learningRate:(double)learningRate NS_SWIFT_NAME(apply(image:fgmask:learningRate:));
/**
* Computes a foreground mask.
*
* @param image Next video frame.
* @param fgmask The output foreground mask as an 8-bit binary image.
* learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
* rate. 0 means that the background model is not updated at all, 1 means that the background model
* is completely reinitialized from the last frame.
*/
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask NS_SWIFT_NAME(apply(image:fgmask:));
//
// void cv::BackgroundSubtractor::getBackgroundImage(Mat& backgroundImage)
//
/**
* Computes a background image.
*
* @param backgroundImage The output background image.
*
* NOTE: Sometimes the background image can be very blurry, as it contain the average background
* statistics.
*/
- (void)getBackgroundImage:(Mat*)backgroundImage NS_SWIFT_NAME(getBackgroundImage(backgroundImage:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,140 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorCNT
/**
* Background subtraction based on counting.
*
* About as fast as MOG2 on a high end system.
* More than twice faster than MOG2 on cheap hardware (benchmarked on Raspberry Pi3).
*
* %Algorithm by Sagi Zeevi ( https://github.com/sagi-z/BackgroundSubtractorCNT )
*
* Member of `Bgsegm`
*/
CV_EXPORTS @interface BackgroundSubtractorCNT : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::bgsegm::BackgroundSubtractorCNT> nativePtrBackgroundSubtractorCNT;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::bgsegm::BackgroundSubtractorCNT>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::bgsegm::BackgroundSubtractorCNT>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::bgsegm::BackgroundSubtractorCNT::apply(Mat image, Mat& fgmask, double learningRate = -1)
//
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask learningRate:(double)learningRate NS_SWIFT_NAME(apply(image:fgmask:learningRate:));
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask NS_SWIFT_NAME(apply(image:fgmask:));
//
// void cv::bgsegm::BackgroundSubtractorCNT::getBackgroundImage(Mat& backgroundImage)
//
- (void)getBackgroundImage:(Mat*)backgroundImage NS_SWIFT_NAME(getBackgroundImage(backgroundImage:));
//
// int cv::bgsegm::BackgroundSubtractorCNT::getMinPixelStability()
//
/**
* Returns number of frames with same pixel color to consider stable.
*/
- (int)getMinPixelStability NS_SWIFT_NAME(getMinPixelStability());
//
// void cv::bgsegm::BackgroundSubtractorCNT::setMinPixelStability(int value)
//
/**
* Sets the number of frames with same pixel color to consider stable.
*/
- (void)setMinPixelStability:(int)value NS_SWIFT_NAME(setMinPixelStability(value:));
//
// int cv::bgsegm::BackgroundSubtractorCNT::getMaxPixelStability()
//
/**
* Returns maximum allowed credit for a pixel in history.
*/
- (int)getMaxPixelStability NS_SWIFT_NAME(getMaxPixelStability());
//
// void cv::bgsegm::BackgroundSubtractorCNT::setMaxPixelStability(int value)
//
/**
* Sets the maximum allowed credit for a pixel in history.
*/
- (void)setMaxPixelStability:(int)value NS_SWIFT_NAME(setMaxPixelStability(value:));
//
// bool cv::bgsegm::BackgroundSubtractorCNT::getUseHistory()
//
/**
* Returns if we're giving a pixel credit for being stable for a long time.
*/
- (BOOL)getUseHistory NS_SWIFT_NAME(getUseHistory());
//
// void cv::bgsegm::BackgroundSubtractorCNT::setUseHistory(bool value)
//
/**
* Sets if we're giving a pixel credit for being stable for a long time.
*/
- (void)setUseHistory:(BOOL)value NS_SWIFT_NAME(setUseHistory(value:));
//
// bool cv::bgsegm::BackgroundSubtractorCNT::getIsParallel()
//
/**
* Returns if we're parallelizing the algorithm.
*/
- (BOOL)getIsParallel NS_SWIFT_NAME(getIsParallel());
//
// void cv::bgsegm::BackgroundSubtractorCNT::setIsParallel(bool value)
//
/**
* Sets if we're parallelizing the algorithm.
*/
- (void)setIsParallel:(BOOL)value NS_SWIFT_NAME(setIsParallel(value:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,242 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorGMG
/**
* Background Subtractor module based on the algorithm given in CITE: Gold2012 .
*
* Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*
* Member of `Bgsegm`
*/
CV_EXPORTS @interface BackgroundSubtractorGMG : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::bgsegm::BackgroundSubtractorGMG> nativePtrBackgroundSubtractorGMG;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::bgsegm::BackgroundSubtractorGMG>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::bgsegm::BackgroundSubtractorGMG>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::bgsegm::BackgroundSubtractorGMG::getMaxFeatures()
//
/**
* Returns total number of distinct colors to maintain in histogram.
*/
- (int)getMaxFeatures NS_SWIFT_NAME(getMaxFeatures());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setMaxFeatures(int maxFeatures)
//
/**
* Sets total number of distinct colors to maintain in histogram.
*/
- (void)setMaxFeatures:(int)maxFeatures NS_SWIFT_NAME(setMaxFeatures(maxFeatures:));
//
// double cv::bgsegm::BackgroundSubtractorGMG::getDefaultLearningRate()
//
/**
* Returns the learning rate of the algorithm.
*
* It lies between 0.0 and 1.0. It determines how quickly features are "forgotten" from
* histograms.
*/
- (double)getDefaultLearningRate NS_SWIFT_NAME(getDefaultLearningRate());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setDefaultLearningRate(double lr)
//
/**
* Sets the learning rate of the algorithm.
*/
- (void)setDefaultLearningRate:(double)lr NS_SWIFT_NAME(setDefaultLearningRate(lr:));
//
// int cv::bgsegm::BackgroundSubtractorGMG::getNumFrames()
//
/**
* Returns the number of frames used to initialize background model.
*/
- (int)getNumFrames NS_SWIFT_NAME(getNumFrames());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setNumFrames(int nframes)
//
/**
* Sets the number of frames used to initialize background model.
*/
- (void)setNumFrames:(int)nframes NS_SWIFT_NAME(setNumFrames(nframes:));
//
// int cv::bgsegm::BackgroundSubtractorGMG::getQuantizationLevels()
//
/**
* Returns the parameter used for quantization of color-space.
*
* It is the number of discrete levels in each channel to be used in histograms.
*/
- (int)getQuantizationLevels NS_SWIFT_NAME(getQuantizationLevels());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setQuantizationLevels(int nlevels)
//
/**
* Sets the parameter used for quantization of color-space
*/
- (void)setQuantizationLevels:(int)nlevels NS_SWIFT_NAME(setQuantizationLevels(nlevels:));
//
// double cv::bgsegm::BackgroundSubtractorGMG::getBackgroundPrior()
//
/**
* Returns the prior probability that each individual pixel is a background pixel.
*/
- (double)getBackgroundPrior NS_SWIFT_NAME(getBackgroundPrior());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setBackgroundPrior(double bgprior)
//
/**
* Sets the prior probability that each individual pixel is a background pixel.
*/
- (void)setBackgroundPrior:(double)bgprior NS_SWIFT_NAME(setBackgroundPrior(bgprior:));
//
// int cv::bgsegm::BackgroundSubtractorGMG::getSmoothingRadius()
//
/**
* Returns the kernel radius used for morphological operations
*/
- (int)getSmoothingRadius NS_SWIFT_NAME(getSmoothingRadius());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setSmoothingRadius(int radius)
//
/**
* Sets the kernel radius used for morphological operations
*/
- (void)setSmoothingRadius:(int)radius NS_SWIFT_NAME(setSmoothingRadius(radius:));
//
// double cv::bgsegm::BackgroundSubtractorGMG::getDecisionThreshold()
//
/**
* Returns the value of decision threshold.
*
* Decision value is the value above which pixel is determined to be FG.
*/
- (double)getDecisionThreshold NS_SWIFT_NAME(getDecisionThreshold());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setDecisionThreshold(double thresh)
//
/**
* Sets the value of decision threshold.
*/
- (void)setDecisionThreshold:(double)thresh NS_SWIFT_NAME(setDecisionThreshold(thresh:));
//
// bool cv::bgsegm::BackgroundSubtractorGMG::getUpdateBackgroundModel()
//
/**
* Returns the status of background model update
*/
- (BOOL)getUpdateBackgroundModel NS_SWIFT_NAME(getUpdateBackgroundModel());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setUpdateBackgroundModel(bool update)
//
/**
* Sets the status of background model update
*/
- (void)setUpdateBackgroundModel:(BOOL)update NS_SWIFT_NAME(setUpdateBackgroundModel(update:));
//
// double cv::bgsegm::BackgroundSubtractorGMG::getMinVal()
//
/**
* Returns the minimum value taken on by pixels in image sequence. Usually 0.
*/
- (double)getMinVal NS_SWIFT_NAME(getMinVal());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setMinVal(double val)
//
/**
* Sets the minimum value taken on by pixels in image sequence.
*/
- (void)setMinVal:(double)val NS_SWIFT_NAME(setMinVal(val:));
//
// double cv::bgsegm::BackgroundSubtractorGMG::getMaxVal()
//
/**
* Returns the maximum value taken on by pixels in image sequence. e.g. 1.0 or 255.
*/
- (double)getMaxVal NS_SWIFT_NAME(getMaxVal());
//
// void cv::bgsegm::BackgroundSubtractorGMG::setMaxVal(double val)
//
/**
* Sets the maximum value taken on by pixels in image sequence.
*/
- (void)setMaxVal:(double)val NS_SWIFT_NAME(setMaxVal(val:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,65 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorGSOC
/**
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* This algorithm demonstrates better performance on CDNET 2014 dataset compared to other algorithms in OpenCV.
*
* Member of `Bgsegm`
*/
CV_EXPORTS @interface BackgroundSubtractorGSOC : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::bgsegm::BackgroundSubtractorGSOC> nativePtrBackgroundSubtractorGSOC;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::bgsegm::BackgroundSubtractorGSOC>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::bgsegm::BackgroundSubtractorGSOC>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::bgsegm::BackgroundSubtractorGSOC::apply(Mat image, Mat& fgmask, double learningRate = -1)
//
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask learningRate:(double)learningRate NS_SWIFT_NAME(apply(image:fgmask:learningRate:));
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask NS_SWIFT_NAME(apply(image:fgmask:));
//
// void cv::bgsegm::BackgroundSubtractorGSOC::getBackgroundImage(Mat& backgroundImage)
//
- (void)getBackgroundImage:(Mat*)backgroundImage NS_SWIFT_NAME(getBackgroundImage(backgroundImage:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,198 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/video.hpp"
#import "opencv2/video/background_segm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorKNN
/**
* K-nearest neighbours - based Background/Foreground Segmentation Algorithm.
*
* The class implements the K-nearest neighbours background subtraction described in CITE: Zivkovic2006 .
* Very efficient if number of foreground pixels is low.
*
* Member of `Video`
*/
CV_EXPORTS @interface BackgroundSubtractorKNN : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BackgroundSubtractorKNN> nativePtrBackgroundSubtractorKNN;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BackgroundSubtractorKNN>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BackgroundSubtractorKNN>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::BackgroundSubtractorKNN::getHistory()
//
/**
* Returns the number of last frames that affect the background model
*/
- (int)getHistory NS_SWIFT_NAME(getHistory());
//
// void cv::BackgroundSubtractorKNN::setHistory(int history)
//
/**
* Sets the number of last frames that affect the background model
*/
- (void)setHistory:(int)history NS_SWIFT_NAME(setHistory(history:));
//
// int cv::BackgroundSubtractorKNN::getNSamples()
//
/**
* Returns the number of data samples in the background model
*/
- (int)getNSamples NS_SWIFT_NAME(getNSamples());
//
// void cv::BackgroundSubtractorKNN::setNSamples(int _nN)
//
/**
* Sets the number of data samples in the background model.
*
* The model needs to be reinitalized to reserve memory.
*/
- (void)setNSamples:(int)_nN NS_SWIFT_NAME(setNSamples(_nN:));
//
// double cv::BackgroundSubtractorKNN::getDist2Threshold()
//
/**
* Returns the threshold on the squared distance between the pixel and the sample
*
* The threshold on the squared distance between the pixel and the sample to decide whether a pixel is
* close to a data sample.
*/
- (double)getDist2Threshold NS_SWIFT_NAME(getDist2Threshold());
//
// void cv::BackgroundSubtractorKNN::setDist2Threshold(double _dist2Threshold)
//
/**
* Sets the threshold on the squared distance
*/
- (void)setDist2Threshold:(double)_dist2Threshold NS_SWIFT_NAME(setDist2Threshold(_dist2Threshold:));
//
// int cv::BackgroundSubtractorKNN::getkNNSamples()
//
/**
* Returns the number of neighbours, the k in the kNN.
*
* K is the number of samples that need to be within dist2Threshold in order to decide that that
* pixel is matching the kNN background model.
*/
- (int)getkNNSamples NS_SWIFT_NAME(getkNNSamples());
//
// void cv::BackgroundSubtractorKNN::setkNNSamples(int _nkNN)
//
/**
* Sets the k in the kNN. How many nearest neighbours need to match.
*/
- (void)setkNNSamples:(int)_nkNN NS_SWIFT_NAME(setkNNSamples(_nkNN:));
//
// bool cv::BackgroundSubtractorKNN::getDetectShadows()
//
/**
* Returns the shadow detection flag
*
* If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorKNN for
* details.
*/
- (BOOL)getDetectShadows NS_SWIFT_NAME(getDetectShadows());
//
// void cv::BackgroundSubtractorKNN::setDetectShadows(bool detectShadows)
//
/**
* Enables or disables shadow detection
*/
- (void)setDetectShadows:(BOOL)detectShadows NS_SWIFT_NAME(setDetectShadows(detectShadows:));
//
// int cv::BackgroundSubtractorKNN::getShadowValue()
//
/**
* Returns the shadow value
*
* Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0
* in the mask always means background, 255 means foreground.
*/
- (int)getShadowValue NS_SWIFT_NAME(getShadowValue());
//
// void cv::BackgroundSubtractorKNN::setShadowValue(int value)
//
/**
* Sets the shadow value
*/
- (void)setShadowValue:(int)value NS_SWIFT_NAME(setShadowValue(value:));
//
// double cv::BackgroundSubtractorKNN::getShadowThreshold()
//
/**
* Returns the shadow threshold
*
* A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in
* the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel
* is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiara,
* Detecting Moving Shadows...*, IEEE PAMI,2003.
*/
- (double)getShadowThreshold NS_SWIFT_NAME(getShadowThreshold());
//
// void cv::BackgroundSubtractorKNN::setShadowThreshold(double threshold)
//
/**
* Sets the shadow threshold
*/
- (void)setShadowThreshold:(double)threshold NS_SWIFT_NAME(setShadowThreshold(threshold:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,63 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorLSBP
/**
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* Member of `Bgsegm`
*/
CV_EXPORTS @interface BackgroundSubtractorLSBP : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::bgsegm::BackgroundSubtractorLSBP> nativePtrBackgroundSubtractorLSBP;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::bgsegm::BackgroundSubtractorLSBP>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::bgsegm::BackgroundSubtractorLSBP>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::bgsegm::BackgroundSubtractorLSBP::apply(Mat image, Mat& fgmask, double learningRate = -1)
//
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask learningRate:(double)learningRate NS_SWIFT_NAME(apply(image:fgmask:learningRate:));
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask NS_SWIFT_NAME(apply(image:fgmask:));
//
// void cv::bgsegm::BackgroundSubtractorLSBP::getBackgroundImage(Mat& backgroundImage)
//
- (void)getBackgroundImage:(Mat*)backgroundImage NS_SWIFT_NAME(getBackgroundImage(backgroundImage:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,49 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorLSBPDesc
/**
* This is for calculation of the LSBP descriptors.
*
* Member of `Bgsegm`
*/
CV_EXPORTS @interface BackgroundSubtractorLSBPDesc : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::bgsegm::BackgroundSubtractorLSBPDesc> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::bgsegm::BackgroundSubtractorLSBPDesc>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::bgsegm::BackgroundSubtractorLSBPDesc>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,99 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorMOG
/**
* Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
*
* The class implements the algorithm described in CITE: KB2001 .
*
* Member of `Bgsegm`
*/
CV_EXPORTS @interface BackgroundSubtractorMOG : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::bgsegm::BackgroundSubtractorMOG> nativePtrBackgroundSubtractorMOG;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::bgsegm::BackgroundSubtractorMOG>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::bgsegm::BackgroundSubtractorMOG>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::bgsegm::BackgroundSubtractorMOG::getHistory()
//
- (int)getHistory NS_SWIFT_NAME(getHistory());
//
// void cv::bgsegm::BackgroundSubtractorMOG::setHistory(int nframes)
//
- (void)setHistory:(int)nframes NS_SWIFT_NAME(setHistory(nframes:));
//
// int cv::bgsegm::BackgroundSubtractorMOG::getNMixtures()
//
- (int)getNMixtures NS_SWIFT_NAME(getNMixtures());
//
// void cv::bgsegm::BackgroundSubtractorMOG::setNMixtures(int nmix)
//
- (void)setNMixtures:(int)nmix NS_SWIFT_NAME(setNMixtures(nmix:));
//
// double cv::bgsegm::BackgroundSubtractorMOG::getBackgroundRatio()
//
- (double)getBackgroundRatio NS_SWIFT_NAME(getBackgroundRatio());
//
// void cv::bgsegm::BackgroundSubtractorMOG::setBackgroundRatio(double backgroundRatio)
//
- (void)setBackgroundRatio:(double)backgroundRatio NS_SWIFT_NAME(setBackgroundRatio(backgroundRatio:));
//
// double cv::bgsegm::BackgroundSubtractorMOG::getNoiseSigma()
//
- (double)getNoiseSigma NS_SWIFT_NAME(getNoiseSigma());
//
// void cv::bgsegm::BackgroundSubtractorMOG::setNoiseSigma(double noiseSigma)
//
- (void)setNoiseSigma:(double)noiseSigma NS_SWIFT_NAME(setNoiseSigma(noiseSigma:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,314 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/video.hpp"
#import "opencv2/video/background_segm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BackgroundSubtractor.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BackgroundSubtractorMOG2
/**
* Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
*
* The class implements the Gaussian mixture model background subtraction described in CITE: Zivkovic2004
* and CITE: Zivkovic2006 .
*
* Member of `Video`
*/
CV_EXPORTS @interface BackgroundSubtractorMOG2 : BackgroundSubtractor
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BackgroundSubtractorMOG2> nativePtrBackgroundSubtractorMOG2;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BackgroundSubtractorMOG2>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BackgroundSubtractorMOG2>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::BackgroundSubtractorMOG2::getHistory()
//
/**
* Returns the number of last frames that affect the background model
*/
- (int)getHistory NS_SWIFT_NAME(getHistory());
//
// void cv::BackgroundSubtractorMOG2::setHistory(int history)
//
/**
* Sets the number of last frames that affect the background model
*/
- (void)setHistory:(int)history NS_SWIFT_NAME(setHistory(history:));
//
// int cv::BackgroundSubtractorMOG2::getNMixtures()
//
/**
* Returns the number of gaussian components in the background model
*/
- (int)getNMixtures NS_SWIFT_NAME(getNMixtures());
//
// void cv::BackgroundSubtractorMOG2::setNMixtures(int nmixtures)
//
/**
* Sets the number of gaussian components in the background model.
*
* The model needs to be reinitalized to reserve memory.
*/
- (void)setNMixtures:(int)nmixtures NS_SWIFT_NAME(setNMixtures(nmixtures:));
//
// double cv::BackgroundSubtractorMOG2::getBackgroundRatio()
//
/**
* Returns the "background ratio" parameter of the algorithm
*
* If a foreground pixel keeps semi-constant value for about backgroundRatio\*history frames, it's
* considered background and added to the model as a center of a new component. It corresponds to TB
* parameter in the paper.
*/
- (double)getBackgroundRatio NS_SWIFT_NAME(getBackgroundRatio());
//
// void cv::BackgroundSubtractorMOG2::setBackgroundRatio(double ratio)
//
/**
* Sets the "background ratio" parameter of the algorithm
*/
- (void)setBackgroundRatio:(double)ratio NS_SWIFT_NAME(setBackgroundRatio(ratio:));
//
// double cv::BackgroundSubtractorMOG2::getVarThreshold()
//
/**
* Returns the variance threshold for the pixel-model match
*
* The main threshold on the squared Mahalanobis distance to decide if the sample is well described by
* the background model or not. Related to Cthr from the paper.
*/
- (double)getVarThreshold NS_SWIFT_NAME(getVarThreshold());
//
// void cv::BackgroundSubtractorMOG2::setVarThreshold(double varThreshold)
//
/**
* Sets the variance threshold for the pixel-model match
*/
- (void)setVarThreshold:(double)varThreshold NS_SWIFT_NAME(setVarThreshold(varThreshold:));
//
// double cv::BackgroundSubtractorMOG2::getVarThresholdGen()
//
/**
* Returns the variance threshold for the pixel-model match used for new mixture component generation
*
* Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the
* existing components (corresponds to Tg in the paper). If a pixel is not close to any component, it
* is considered foreground or added as a new component. 3 sigma =\> Tg=3\*3=9 is default. A smaller Tg
* value generates more components. A higher Tg value may result in a small number of components but
* they can grow too large.
*/
- (double)getVarThresholdGen NS_SWIFT_NAME(getVarThresholdGen());
//
// void cv::BackgroundSubtractorMOG2::setVarThresholdGen(double varThresholdGen)
//
/**
* Sets the variance threshold for the pixel-model match used for new mixture component generation
*/
- (void)setVarThresholdGen:(double)varThresholdGen NS_SWIFT_NAME(setVarThresholdGen(varThresholdGen:));
//
// double cv::BackgroundSubtractorMOG2::getVarInit()
//
/**
* Returns the initial variance of each gaussian component
*/
- (double)getVarInit NS_SWIFT_NAME(getVarInit());
//
// void cv::BackgroundSubtractorMOG2::setVarInit(double varInit)
//
/**
* Sets the initial variance of each gaussian component
*/
- (void)setVarInit:(double)varInit NS_SWIFT_NAME(setVarInit(varInit:));
//
// double cv::BackgroundSubtractorMOG2::getVarMin()
//
- (double)getVarMin NS_SWIFT_NAME(getVarMin());
//
// void cv::BackgroundSubtractorMOG2::setVarMin(double varMin)
//
- (void)setVarMin:(double)varMin NS_SWIFT_NAME(setVarMin(varMin:));
//
// double cv::BackgroundSubtractorMOG2::getVarMax()
//
- (double)getVarMax NS_SWIFT_NAME(getVarMax());
//
// void cv::BackgroundSubtractorMOG2::setVarMax(double varMax)
//
- (void)setVarMax:(double)varMax NS_SWIFT_NAME(setVarMax(varMax:));
//
// double cv::BackgroundSubtractorMOG2::getComplexityReductionThreshold()
//
/**
* Returns the complexity reduction threshold
*
* This parameter defines the number of samples needed to accept to prove the component exists. CT=0.05
* is a default value for all the samples. By setting CT=0 you get an algorithm very similar to the
* standard Stauffer&Grimson algorithm.
*/
- (double)getComplexityReductionThreshold NS_SWIFT_NAME(getComplexityReductionThreshold());
//
// void cv::BackgroundSubtractorMOG2::setComplexityReductionThreshold(double ct)
//
/**
* Sets the complexity reduction threshold
*/
- (void)setComplexityReductionThreshold:(double)ct NS_SWIFT_NAME(setComplexityReductionThreshold(ct:));
//
// bool cv::BackgroundSubtractorMOG2::getDetectShadows()
//
/**
* Returns the shadow detection flag
*
* If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorMOG2 for
* details.
*/
- (BOOL)getDetectShadows NS_SWIFT_NAME(getDetectShadows());
//
// void cv::BackgroundSubtractorMOG2::setDetectShadows(bool detectShadows)
//
/**
* Enables or disables shadow detection
*/
- (void)setDetectShadows:(BOOL)detectShadows NS_SWIFT_NAME(setDetectShadows(detectShadows:));
//
// int cv::BackgroundSubtractorMOG2::getShadowValue()
//
/**
* Returns the shadow value
*
* Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0
* in the mask always means background, 255 means foreground.
*/
- (int)getShadowValue NS_SWIFT_NAME(getShadowValue());
//
// void cv::BackgroundSubtractorMOG2::setShadowValue(int value)
//
/**
* Sets the shadow value
*/
- (void)setShadowValue:(int)value NS_SWIFT_NAME(setShadowValue(value:));
//
// double cv::BackgroundSubtractorMOG2::getShadowThreshold()
//
/**
* Returns the shadow threshold
*
* A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in
* the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel
* is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiara,
* Detecting Moving Shadows...*, IEEE PAMI,2003.
*/
- (double)getShadowThreshold NS_SWIFT_NAME(getShadowThreshold());
//
// void cv::BackgroundSubtractorMOG2::setShadowThreshold(double threshold)
//
/**
* Sets the shadow threshold
*/
- (void)setShadowThreshold:(double)threshold NS_SWIFT_NAME(setShadowThreshold(threshold:));
//
// void cv::BackgroundSubtractorMOG2::apply(Mat image, Mat& fgmask, double learningRate = -1)
//
/**
* Computes a foreground mask.
*
* @param image Next video frame. Floating point frame will be used without scaling and should be in range `$$[0,255]$$`.
* @param fgmask The output foreground mask as an 8-bit binary image.
* @param learningRate The value between 0 and 1 that indicates how fast the background model is
* learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
* rate. 0 means that the background model is not updated at all, 1 means that the background model
* is completely reinitialized from the last frame.
*/
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask learningRate:(double)learningRate NS_SWIFT_NAME(apply(image:fgmask:learningRate:));
/**
* Computes a foreground mask.
*
* @param image Next video frame. Floating point frame will be used without scaling and should be in range `$$[0,255]$$`.
* @param fgmask The output foreground mask as an 8-bit binary image.
* learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
* rate. 0 means that the background model is not updated at all, 1 means that the background model
* is completely reinitialized from the last frame.
*/
- (void)apply:(Mat*)image fgmask:(Mat*)fgmask NS_SWIFT_NAME(apply(image:fgmask:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,114 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/barcode.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "GraphicalCodeDetector.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BarcodeDetector
/**
* The BarcodeDetector module
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface BarcodeDetector : GraphicalCodeDetector
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::barcode::BarcodeDetector> nativePtrBarcodeDetector;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::barcode::BarcodeDetector>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::barcode::BarcodeDetector>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::barcode::BarcodeDetector::BarcodeDetector()
//
/**
* Initialize the BarcodeDetector.
*/
- (instancetype)init;
//
// cv::barcode::BarcodeDetector::BarcodeDetector(string prototxt_path, string model_path)
//
/**
* Initialize the BarcodeDetector.
*
* Parameters allow to load _optional_ Super Resolution DNN model for better quality.
* @param prototxt_path prototxt file path for the super resolution model
* @param model_path model file path for the super resolution model
*/
- (instancetype)initWithPrototxt_path:(NSString*)prototxt_path model_path:(NSString*)model_path;
//
// bool cv::barcode::BarcodeDetector::decodeWithType(Mat img, Mat points, vector_string& decoded_info, vector_string& decoded_type)
//
/**
* Decodes barcode in image once it's found by the detect() method.
*
* @param img grayscale or color (BGR) image containing bar code.
* @param points vector of rotated rectangle vertices found by detect() method (or some other algorithm).
* For N detected barcodes, the dimensions of this array should be [N][4].
* Order of four points in vector<Point2f> is bottomLeft, topLeft, topRight, bottomRight.
* @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector strings, specifies the type of these barcodes
* @return true if at least one valid barcode have been found
*/
- (BOOL)decodeWithType:(Mat*)img points:(Mat*)points decoded_info:(NSMutableArray<NSString*>*)decoded_info decoded_type:(NSMutableArray<NSString*>*)decoded_type NS_SWIFT_NAME(decodeWithType(img:points:decoded_info:decoded_type:));
//
// bool cv::barcode::BarcodeDetector::detectAndDecodeWithType(Mat img, vector_string& decoded_info, vector_string& decoded_type, Mat& points = Mat())
//
/**
* Both detects and decodes barcode
*
* @param img grayscale or color (BGR) image containing barcode.
* @param decoded_info UTF8-encoded output vector of string(s) or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector of strings, specifies the type of these barcodes
* @param points optional output vector of vertices of the found barcode rectangle. Will be empty if not found.
* @return true if at least one valid barcode have been found
*/
- (BOOL)detectAndDecodeWithType:(Mat*)img decoded_info:(NSMutableArray<NSString*>*)decoded_info decoded_type:(NSMutableArray<NSString*>*)decoded_type points:(Mat*)points NS_SWIFT_NAME(detectAndDecodeWithType(img:decoded_info:decoded_type:points:));
/**
* Both detects and decodes barcode
*
* @param img grayscale or color (BGR) image containing barcode.
* @param decoded_info UTF8-encoded output vector of string(s) or empty vector of string if the codes cannot be decoded.
* @param decoded_type vector of strings, specifies the type of these barcodes
* @return true if at least one valid barcode have been found
*/
- (BOOL)detectAndDecodeWithType:(Mat*)img decoded_info:(NSMutableArray<NSString*>*)decoded_info decoded_type:(NSMutableArray<NSString*>*)decoded_type NS_SWIFT_NAME(detectAndDecodeWithType(img:decoded_info:decoded_type:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,49 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BaseCascadeClassifier
/**
* The BaseCascadeClassifier module
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface BaseCascadeClassifier : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::BaseCascadeClassifier> nativePtrBaseCascadeClassifier;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::BaseCascadeClassifier>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::BaseCascadeClassifier>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,50 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/text.hpp"
#import "opencv2/text/ocr.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class BaseOCR
/**
* The BaseOCR module
*
* Member of `Text`
*/
CV_EXPORTS @interface BaseOCR : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::text::BaseOCR> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::text::BaseOCR>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::text::BaseOCR>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,116 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/facerec.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "FaceRecognizer.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class BasicFaceRecognizer
/**
* The BasicFaceRecognizer module
*
* Member of `Face`
*/
CV_EXPORTS @interface BasicFaceRecognizer : FaceRecognizer
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::BasicFaceRecognizer> nativePtrBasicFaceRecognizer;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::BasicFaceRecognizer>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::BasicFaceRecognizer>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::face::BasicFaceRecognizer::getNumComponents()
//
/**
* @see `-setNumComponents:`
*/
- (int)getNumComponents NS_SWIFT_NAME(getNumComponents());
//
// void cv::face::BasicFaceRecognizer::setNumComponents(int val)
//
/**
* getNumComponents @see `-getNumComponents:`
*/
- (void)setNumComponents:(int)val NS_SWIFT_NAME(setNumComponents(val:));
//
// double cv::face::BasicFaceRecognizer::getThreshold()
//
/**
* @see `-setThreshold:`
*/
- (double)getThreshold NS_SWIFT_NAME(getThreshold());
//
// void cv::face::BasicFaceRecognizer::setThreshold(double val)
//
/**
* getThreshold @see `-getThreshold:`
*/
- (void)setThreshold:(double)val NS_SWIFT_NAME(setThreshold(val:));
//
// vector_Mat cv::face::BasicFaceRecognizer::getProjections()
//
- (NSArray<Mat*>*)getProjections NS_SWIFT_NAME(getProjections());
//
// Mat cv::face::BasicFaceRecognizer::getLabels()
//
- (Mat*)getLabels NS_SWIFT_NAME(getLabels());
//
// Mat cv::face::BasicFaceRecognizer::getEigenValues()
//
- (Mat*)getEigenValues NS_SWIFT_NAME(getEigenValues());
//
// Mat cv::face::BasicFaceRecognizer::getEigenVectors()
//
- (Mat*)getEigenVectors NS_SWIFT_NAME(getEigenVectors());
//
// Mat cv::face::BasicFaceRecognizer::getMean()
//
- (Mat*)getMean NS_SWIFT_NAME(getMean());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,597 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bgsegm.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class BackgroundSubtractorCNT;
@class BackgroundSubtractorGMG;
@class BackgroundSubtractorGSOC;
@class BackgroundSubtractorLSBP;
@class BackgroundSubtractorMOG;
@class Mat;
@class SyntheticSequenceGenerator;
// C++: enum LSBPCameraMotionCompensation (cv.bgsegm.LSBPCameraMotionCompensation)
typedef NS_ENUM(int, LSBPCameraMotionCompensation) {
LSBP_CAMERA_MOTION_COMPENSATION_NONE = 0,
LSBP_CAMERA_MOTION_COMPENSATION_LK = 0+1
};
NS_ASSUME_NONNULL_BEGIN
// C++: class Bgsegm
/**
* The Bgsegm module
*
* Member classes: `BackgroundSubtractorMOG`, `BackgroundSubtractorGMG`, `BackgroundSubtractorCNT`, `BackgroundSubtractorGSOC`, `BackgroundSubtractorLSBP`, `BackgroundSubtractorLSBPDesc`, `SyntheticSequenceGenerator`
*
* Member enums: `LSBPCameraMotionCompensation`
*/
CV_EXPORTS @interface Bgsegm : NSObject
#pragma mark - Methods
//
// Ptr_BackgroundSubtractorMOG cv::bgsegm::createBackgroundSubtractorMOG(int history = 200, int nmixtures = 5, double backgroundRatio = 0.7, double noiseSigma = 0)
//
/**
* Creates mixture-of-gaussian background subtractor
*
* @param history Length of the history.
* @param nmixtures Number of Gaussian mixtures.
* @param backgroundRatio Background ratio.
* @param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
* means some automatic value.
*/
+ (BackgroundSubtractorMOG*)createBackgroundSubtractorMOG:(int)history nmixtures:(int)nmixtures backgroundRatio:(double)backgroundRatio noiseSigma:(double)noiseSigma NS_SWIFT_NAME(createBackgroundSubtractorMOG(history:nmixtures:backgroundRatio:noiseSigma:));
/**
* Creates mixture-of-gaussian background subtractor
*
* @param history Length of the history.
* @param nmixtures Number of Gaussian mixtures.
* @param backgroundRatio Background ratio.
* means some automatic value.
*/
+ (BackgroundSubtractorMOG*)createBackgroundSubtractorMOG:(int)history nmixtures:(int)nmixtures backgroundRatio:(double)backgroundRatio NS_SWIFT_NAME(createBackgroundSubtractorMOG(history:nmixtures:backgroundRatio:));
/**
* Creates mixture-of-gaussian background subtractor
*
* @param history Length of the history.
* @param nmixtures Number of Gaussian mixtures.
* means some automatic value.
*/
+ (BackgroundSubtractorMOG*)createBackgroundSubtractorMOG:(int)history nmixtures:(int)nmixtures NS_SWIFT_NAME(createBackgroundSubtractorMOG(history:nmixtures:));
/**
* Creates mixture-of-gaussian background subtractor
*
* @param history Length of the history.
* means some automatic value.
*/
+ (BackgroundSubtractorMOG*)createBackgroundSubtractorMOG:(int)history NS_SWIFT_NAME(createBackgroundSubtractorMOG(history:));
/**
* Creates mixture-of-gaussian background subtractor
*
* means some automatic value.
*/
+ (BackgroundSubtractorMOG*)createBackgroundSubtractorMOG NS_SWIFT_NAME(createBackgroundSubtractorMOG());
//
// Ptr_BackgroundSubtractorGMG cv::bgsegm::createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8)
//
/**
* Creates a GMG Background Subtractor
*
* @param initializationFrames number of frames used to initialize the background models.
* @param decisionThreshold Threshold value, above which it is marked foreground, else background.
*/
+ (BackgroundSubtractorGMG*)createBackgroundSubtractorGMG:(int)initializationFrames decisionThreshold:(double)decisionThreshold NS_SWIFT_NAME(createBackgroundSubtractorGMG(initializationFrames:decisionThreshold:));
/**
* Creates a GMG Background Subtractor
*
* @param initializationFrames number of frames used to initialize the background models.
*/
+ (BackgroundSubtractorGMG*)createBackgroundSubtractorGMG:(int)initializationFrames NS_SWIFT_NAME(createBackgroundSubtractorGMG(initializationFrames:));
/**
* Creates a GMG Background Subtractor
*
*/
+ (BackgroundSubtractorGMG*)createBackgroundSubtractorGMG NS_SWIFT_NAME(createBackgroundSubtractorGMG());
//
// Ptr_BackgroundSubtractorCNT cv::bgsegm::createBackgroundSubtractorCNT(int minPixelStability = 15, bool useHistory = true, int maxPixelStability = 15*60, bool isParallel = true)
//
/**
* Creates a CNT Background Subtractor
*
* @param minPixelStability number of frames with same pixel color to consider stable
* @param useHistory determines if we're giving a pixel credit for being stable for a long time
* @param maxPixelStability maximum allowed credit for a pixel in history
* @param isParallel determines if we're parallelizing the algorithm
*/
+ (BackgroundSubtractorCNT*)createBackgroundSubtractorCNT:(int)minPixelStability useHistory:(BOOL)useHistory maxPixelStability:(int)maxPixelStability isParallel:(BOOL)isParallel NS_SWIFT_NAME(createBackgroundSubtractorCNT(minPixelStability:useHistory:maxPixelStability:isParallel:));
/**
* Creates a CNT Background Subtractor
*
* @param minPixelStability number of frames with same pixel color to consider stable
* @param useHistory determines if we're giving a pixel credit for being stable for a long time
* @param maxPixelStability maximum allowed credit for a pixel in history
*/
+ (BackgroundSubtractorCNT*)createBackgroundSubtractorCNT:(int)minPixelStability useHistory:(BOOL)useHistory maxPixelStability:(int)maxPixelStability NS_SWIFT_NAME(createBackgroundSubtractorCNT(minPixelStability:useHistory:maxPixelStability:));
/**
* Creates a CNT Background Subtractor
*
* @param minPixelStability number of frames with same pixel color to consider stable
* @param useHistory determines if we're giving a pixel credit for being stable for a long time
*/
+ (BackgroundSubtractorCNT*)createBackgroundSubtractorCNT:(int)minPixelStability useHistory:(BOOL)useHistory NS_SWIFT_NAME(createBackgroundSubtractorCNT(minPixelStability:useHistory:));
/**
* Creates a CNT Background Subtractor
*
* @param minPixelStability number of frames with same pixel color to consider stable
*/
+ (BackgroundSubtractorCNT*)createBackgroundSubtractorCNT:(int)minPixelStability NS_SWIFT_NAME(createBackgroundSubtractorCNT(minPixelStability:));
/**
* Creates a CNT Background Subtractor
*
*/
+ (BackgroundSubtractorCNT*)createBackgroundSubtractorCNT NS_SWIFT_NAME(createBackgroundSubtractorCNT());
//
// Ptr_BackgroundSubtractorGSOC cv::bgsegm::createBackgroundSubtractorGSOC(LSBPCameraMotionCompensation mc = LSBP_CAMERA_MOTION_COMPENSATION_NONE, int nSamples = 20, float replaceRate = 0.003f, float propagationRate = 0.01f, int hitsThreshold = 32, float alpha = 0.01f, float beta = 0.0022f, float blinkingSupressionDecay = 0.1f, float blinkingSupressionMultiplier = 0.1f, float noiseRemovalThresholdFacBG = 0.0004f, float noiseRemovalThresholdFacFG = 0.0008f)
//
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
* @param alpha Scale coefficient for threshold.
* @param beta Bias coefficient for threshold.
* @param blinkingSupressionDecay Blinking supression decay factor.
* @param blinkingSupressionMultiplier Blinking supression multiplier.
* @param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
* @param noiseRemovalThresholdFacFG Strength of the noise removal for foreground points.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold alpha:(float)alpha beta:(float)beta blinkingSupressionDecay:(float)blinkingSupressionDecay blinkingSupressionMultiplier:(float)blinkingSupressionMultiplier noiseRemovalThresholdFacBG:(float)noiseRemovalThresholdFacBG noiseRemovalThresholdFacFG:(float)noiseRemovalThresholdFacFG NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:alpha:beta:blinkingSupressionDecay:blinkingSupressionMultiplier:noiseRemovalThresholdFacBG:noiseRemovalThresholdFacFG:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
* @param alpha Scale coefficient for threshold.
* @param beta Bias coefficient for threshold.
* @param blinkingSupressionDecay Blinking supression decay factor.
* @param blinkingSupressionMultiplier Blinking supression multiplier.
* @param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold alpha:(float)alpha beta:(float)beta blinkingSupressionDecay:(float)blinkingSupressionDecay blinkingSupressionMultiplier:(float)blinkingSupressionMultiplier noiseRemovalThresholdFacBG:(float)noiseRemovalThresholdFacBG NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:alpha:beta:blinkingSupressionDecay:blinkingSupressionMultiplier:noiseRemovalThresholdFacBG:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
* @param alpha Scale coefficient for threshold.
* @param beta Bias coefficient for threshold.
* @param blinkingSupressionDecay Blinking supression decay factor.
* @param blinkingSupressionMultiplier Blinking supression multiplier.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold alpha:(float)alpha beta:(float)beta blinkingSupressionDecay:(float)blinkingSupressionDecay blinkingSupressionMultiplier:(float)blinkingSupressionMultiplier NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:alpha:beta:blinkingSupressionDecay:blinkingSupressionMultiplier:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
* @param alpha Scale coefficient for threshold.
* @param beta Bias coefficient for threshold.
* @param blinkingSupressionDecay Blinking supression decay factor.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold alpha:(float)alpha beta:(float)beta blinkingSupressionDecay:(float)blinkingSupressionDecay NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:alpha:beta:blinkingSupressionDecay:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
* @param alpha Scale coefficient for threshold.
* @param beta Bias coefficient for threshold.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold alpha:(float)alpha beta:(float)beta NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:alpha:beta:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
* @param alpha Scale coefficient for threshold.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold alpha:(float)alpha NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:alpha:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
* @param hitsThreshold How many positives the sample must get before it will be considered as a possible replacement.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate hitsThreshold:(int)hitsThreshold NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:hitsThreshold:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
* @param propagationRate Probability of propagating to neighbors.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate propagationRate:(float)propagationRate NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:propagationRate:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param replaceRate Probability of replacing the old sample - how fast the model will update itself.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples replaceRate:(float)replaceRate NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:replaceRate:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:nSamples:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
* @param mc Whether to use camera motion compensation.
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC:(LSBPCameraMotionCompensation)mc NS_SWIFT_NAME(createBackgroundSubtractorGSOC(mc:));
/**
* Creates an instance of BackgroundSubtractorGSOC algorithm.
*
* Implementation of the different yet better algorithm which is called GSOC, as it was implemented during GSOC and was not originated from any paper.
*
*/
+ (BackgroundSubtractorGSOC*)createBackgroundSubtractorGSOC NS_SWIFT_NAME(createBackgroundSubtractorGSOC());
//
// Ptr_BackgroundSubtractorLSBP cv::bgsegm::createBackgroundSubtractorLSBP(LSBPCameraMotionCompensation mc = LSBP_CAMERA_MOTION_COMPENSATION_NONE, int nSamples = 20, int LSBPRadius = 16, float Tlower = 2.0f, float Tupper = 32.0f, float Tinc = 1.0f, float Tdec = 0.05f, float Rscale = 10.0f, float Rincdec = 0.005f, float noiseRemovalThresholdFacBG = 0.0004f, float noiseRemovalThresholdFacFG = 0.0008f, int LSBPthreshold = 8, int minCount = 2)
//
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
* @param Rscale Scale coefficient for threshold values.
* @param Rincdec Increase/Decrease step for threshold values.
* @param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
* @param noiseRemovalThresholdFacFG Strength of the noise removal for foreground points.
* @param LSBPthreshold Threshold for LSBP binary string.
* @param minCount Minimal number of matches for sample to be considered as foreground.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec Rscale:(float)Rscale Rincdec:(float)Rincdec noiseRemovalThresholdFacBG:(float)noiseRemovalThresholdFacBG noiseRemovalThresholdFacFG:(float)noiseRemovalThresholdFacFG LSBPthreshold:(int)LSBPthreshold minCount:(int)minCount NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:Rscale:Rincdec:noiseRemovalThresholdFacBG:noiseRemovalThresholdFacFG:LSBPthreshold:minCount:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
* @param Rscale Scale coefficient for threshold values.
* @param Rincdec Increase/Decrease step for threshold values.
* @param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
* @param noiseRemovalThresholdFacFG Strength of the noise removal for foreground points.
* @param LSBPthreshold Threshold for LSBP binary string.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec Rscale:(float)Rscale Rincdec:(float)Rincdec noiseRemovalThresholdFacBG:(float)noiseRemovalThresholdFacBG noiseRemovalThresholdFacFG:(float)noiseRemovalThresholdFacFG LSBPthreshold:(int)LSBPthreshold NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:Rscale:Rincdec:noiseRemovalThresholdFacBG:noiseRemovalThresholdFacFG:LSBPthreshold:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
* @param Rscale Scale coefficient for threshold values.
* @param Rincdec Increase/Decrease step for threshold values.
* @param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
* @param noiseRemovalThresholdFacFG Strength of the noise removal for foreground points.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec Rscale:(float)Rscale Rincdec:(float)Rincdec noiseRemovalThresholdFacBG:(float)noiseRemovalThresholdFacBG noiseRemovalThresholdFacFG:(float)noiseRemovalThresholdFacFG NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:Rscale:Rincdec:noiseRemovalThresholdFacBG:noiseRemovalThresholdFacFG:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
* @param Rscale Scale coefficient for threshold values.
* @param Rincdec Increase/Decrease step for threshold values.
* @param noiseRemovalThresholdFacBG Strength of the noise removal for background points.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec Rscale:(float)Rscale Rincdec:(float)Rincdec noiseRemovalThresholdFacBG:(float)noiseRemovalThresholdFacBG NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:Rscale:Rincdec:noiseRemovalThresholdFacBG:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
* @param Rscale Scale coefficient for threshold values.
* @param Rincdec Increase/Decrease step for threshold values.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec Rscale:(float)Rscale Rincdec:(float)Rincdec NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:Rscale:Rincdec:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
* @param Rscale Scale coefficient for threshold values.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec Rscale:(float)Rscale NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:Rscale:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
* @param Tdec Decrease step for T-values. See CITE: LGuo2016 for details.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc Tdec:(float)Tdec NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:Tdec:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
* @param Tinc Increase step for T-values. See CITE: LGuo2016 for details.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper Tinc:(float)Tinc NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:Tinc:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
* @param Tupper Upper bound for T-values. See CITE: LGuo2016 for details.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower Tupper:(float)Tupper NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:Tupper:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
* @param Tlower Lower bound for T-values. See CITE: LGuo2016 for details.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius Tlower:(float)Tlower NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:Tlower:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
* @param LSBPRadius LSBP descriptor radius.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples LSBPRadius:(int)LSBPRadius NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:LSBPRadius:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
* @param nSamples Number of samples to maintain at each point of the frame.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc nSamples:(int)nSamples NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:nSamples:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
* @param mc Whether to use camera motion compensation.
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP:(LSBPCameraMotionCompensation)mc NS_SWIFT_NAME(createBackgroundSubtractorLSBP(mc:));
/**
* Creates an instance of BackgroundSubtractorLSBP algorithm.
*
* Background Subtraction using Local SVD Binary Pattern. More details about the algorithm can be found at CITE: LGuo2016
*
*/
+ (BackgroundSubtractorLSBP*)createBackgroundSubtractorLSBP NS_SWIFT_NAME(createBackgroundSubtractorLSBP());
//
// Ptr_SyntheticSequenceGenerator cv::bgsegm::createSyntheticSequenceGenerator(Mat background, Mat object, double amplitude = 2.0, double wavelength = 20.0, double wavespeed = 0.2, double objspeed = 6.0)
//
/**
* Creates an instance of SyntheticSequenceGenerator.
*
* @param background Background image for object.
* @param object Object image which will move slowly over the background.
* @param amplitude Amplitude of wave distortion applied to background.
* @param wavelength Length of waves in distortion applied to background.
* @param wavespeed How fast waves will move.
* @param objspeed How fast object will fly over background.
*/
+ (SyntheticSequenceGenerator*)createSyntheticSequenceGenerator:(Mat*)background object:(Mat*)object amplitude:(double)amplitude wavelength:(double)wavelength wavespeed:(double)wavespeed objspeed:(double)objspeed NS_SWIFT_NAME(createSyntheticSequenceGenerator(background:object:amplitude:wavelength:wavespeed:objspeed:));
/**
* Creates an instance of SyntheticSequenceGenerator.
*
* @param background Background image for object.
* @param object Object image which will move slowly over the background.
* @param amplitude Amplitude of wave distortion applied to background.
* @param wavelength Length of waves in distortion applied to background.
* @param wavespeed How fast waves will move.
*/
+ (SyntheticSequenceGenerator*)createSyntheticSequenceGenerator:(Mat*)background object:(Mat*)object amplitude:(double)amplitude wavelength:(double)wavelength wavespeed:(double)wavespeed NS_SWIFT_NAME(createSyntheticSequenceGenerator(background:object:amplitude:wavelength:wavespeed:));
/**
* Creates an instance of SyntheticSequenceGenerator.
*
* @param background Background image for object.
* @param object Object image which will move slowly over the background.
* @param amplitude Amplitude of wave distortion applied to background.
* @param wavelength Length of waves in distortion applied to background.
*/
+ (SyntheticSequenceGenerator*)createSyntheticSequenceGenerator:(Mat*)background object:(Mat*)object amplitude:(double)amplitude wavelength:(double)wavelength NS_SWIFT_NAME(createSyntheticSequenceGenerator(background:object:amplitude:wavelength:));
/**
* Creates an instance of SyntheticSequenceGenerator.
*
* @param background Background image for object.
* @param object Object image which will move slowly over the background.
* @param amplitude Amplitude of wave distortion applied to background.
*/
+ (SyntheticSequenceGenerator*)createSyntheticSequenceGenerator:(Mat*)background object:(Mat*)object amplitude:(double)amplitude NS_SWIFT_NAME(createSyntheticSequenceGenerator(background:object:amplitude:));
/**
* Creates an instance of SyntheticSequenceGenerator.
*
* @param background Background image for object.
* @param object Object image which will move slowly over the background.
*/
+ (SyntheticSequenceGenerator*)createSyntheticSequenceGenerator:(Mat*)background object:(Mat*)object NS_SWIFT_NAME(createSyntheticSequenceGenerator(background:object:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,45 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/bioinspired.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class Bioinspired
/**
* The Bioinspired module
*
* Member classes: `TransientAreasSegmentationModule`, `Retina`, `RetinaFastToneMapping`
*
*/
CV_EXPORTS @interface Bioinspired : NSObject
#pragma mark - Class Constants
@property (class, readonly) int RETINA_COLOR_RANDOM NS_SWIFT_NAME(RETINA_COLOR_RANDOM);
@property (class, readonly) int RETINA_COLOR_DIAGONAL NS_SWIFT_NAME(RETINA_COLOR_DIAGONAL);
@property (class, readonly) int RETINA_COLOR_BAYER NS_SWIFT_NAME(RETINA_COLOR_BAYER);
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,77 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/img_hash.hpp"
#import "opencv2/img_hash/block_mean_hash.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "ImgHashBase.h"
#import "Img_hash.h"
@class DoubleVector;
NS_ASSUME_NONNULL_BEGIN
// C++: class BlockMeanHash
/**
* Image hash based on block mean.
*
* See CITE: zauner2010implementation for details.
*
* Member of `Img_hash`
*/
CV_EXPORTS @interface BlockMeanHash : ImgHashBase
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::img_hash::BlockMeanHash> nativePtrBlockMeanHash;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::img_hash::BlockMeanHash>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::img_hash::BlockMeanHash>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::img_hash::BlockMeanHash::setMode(BlockMeanHashMode mode)
//
/**
* Create BlockMeanHash object
* @param mode the mode
*/
- (void)setMode:(BlockMeanHashMode)mode NS_SWIFT_NAME(setMode(mode:));
//
// vector_double cv::img_hash::BlockMeanHash::getMean()
//
- (DoubleVector*)getMean NS_SWIFT_NAME(getMean());
//
// static Ptr_BlockMeanHash cv::img_hash::BlockMeanHash::create(BlockMeanHashMode mode = BLOCK_MEAN_HASH_MODE_0)
//
+ (BlockMeanHash*)create:(BlockMeanHashMode)mode NS_SWIFT_NAME(create(mode:));
+ (BlockMeanHash*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,184 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/aruco_board.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Dictionary;
@class IntVector;
@class Mat;
@class Point3f;
@class Size2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class Board
/**
* Board of ArUco markers
*
* A board is a set of markers in the 3D space with a common coordinate system.
* The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
* A Board object is composed by:
* - The object points of the marker corners, i.e. their coordinates respect to the board system.
* - The dictionary which indicates the type of markers of the board
* - The identifier of all the markers in the board.
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface Board : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::Board> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::Board>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::Board>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::Board::Board(vector_Mat objPoints, Dictionary dictionary, Mat ids)
//
/**
* Common Board constructor
*
* @param objPoints array of object points of all the marker corners in the board
* @param dictionary the dictionary of markers employed for this board
* @param ids vector of the identifiers of the markers in the board
*/
- (instancetype)initWithObjPoints:(NSArray<Mat*>*)objPoints dictionary:(Dictionary*)dictionary ids:(Mat*)ids;
//
// Dictionary cv::aruco::Board::getDictionary()
//
/**
* return the Dictionary of markers employed for this board
*/
- (Dictionary*)getDictionary NS_SWIFT_NAME(getDictionary());
//
// vector_vector_Point3f cv::aruco::Board::getObjPoints()
//
/**
* return array of object points of all the marker corners in the board.
*
* Each marker include its 4 corners in this order:
* - objPoints[i][0] - left-top point of i-th marker
* - objPoints[i][1] - right-top point of i-th marker
* - objPoints[i][2] - right-bottom point of i-th marker
* - objPoints[i][3] - left-bottom point of i-th marker
*
* Markers are placed in a certain order - row by row, left to right in every row. For M markers, the size is Mx4.
*/
- (NSArray<NSArray<Point3f*>*>*)getObjPoints NS_SWIFT_NAME(getObjPoints());
//
// vector_int cv::aruco::Board::getIds()
//
/**
* vector of the identifiers of the markers in the board (should be the same size as objPoints)
* @return vector of the identifiers of the markers
*/
- (IntVector*)getIds NS_SWIFT_NAME(getIds());
//
// Point3f cv::aruco::Board::getRightBottomCorner()
//
/**
* get coordinate of the bottom right corner of the board, is set when calling the function create()
*/
- (Point3f*)getRightBottomCorner NS_SWIFT_NAME(getRightBottomCorner());
//
// void cv::aruco::Board::matchImagePoints(vector_Mat detectedCorners, Mat detectedIds, Mat& objPoints, Mat& imgPoints)
//
/**
* Given a board configuration and a set of detected markers, returns the corresponding
* image points and object points, can be used in solvePnP()
*
* @param detectedCorners List of detected marker corners of the board.
* For cv::Board and cv::GridBoard the method expects std::vector<std::vector<Point2f>> or std::vector<Mat> with Aruco marker corners.
* For cv::CharucoBoard the method expects std::vector<Point2f> or Mat with ChAruco corners (chess board corners matched with Aruco markers).
*
* @param detectedIds List of identifiers for each marker or charuco corner.
* For any Board class the method expects std::vector<int> or Mat.
*
* @param objPoints Vector of marker points in the board coordinate space.
* For any Board class the method expects std::vector<cv::Point3f> objectPoints or cv::Mat
*
* @param imgPoints Vector of marker points in the image coordinate space.
* For any Board class the method expects std::vector<cv::Point2f> objectPoints or cv::Mat
*
* @see `solvePnP`
*/
- (void)matchImagePoints:(NSArray<Mat*>*)detectedCorners detectedIds:(Mat*)detectedIds objPoints:(Mat*)objPoints imgPoints:(Mat*)imgPoints NS_SWIFT_NAME(matchImagePoints(detectedCorners:detectedIds:objPoints:imgPoints:));
//
// void cv::aruco::Board::generateImage(Size outSize, Mat& img, int marginSize = 0, int borderBits = 1)
//
/**
* Draw a planar board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the board, ready to be printed.
*/
- (void)generateImage:(Size2i*)outSize img:(Mat*)img marginSize:(int)marginSize borderBits:(int)borderBits NS_SWIFT_NAME(generateImage(outSize:img:marginSize:borderBits:));
/**
* Draw a planar board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
*
* This function return the image of the board, ready to be printed.
*/
- (void)generateImage:(Size2i*)outSize img:(Mat*)img marginSize:(int)marginSize NS_SWIFT_NAME(generateImage(outSize:img:marginSize:));
/**
* Draw a planar board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
*
* This function return the image of the board, ready to be printed.
*/
- (void)generateImage:(Size2i*)outSize img:(Mat*)img NS_SWIFT_NAME(generateImage(outSize:img:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,151 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ml.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "DTrees.h"
// C++: enum Types (cv.ml.Boost.Types)
typedef NS_ENUM(int, Types) {
Boost_DISCRETE NS_SWIFT_NAME(DISCRETE) = 0,
Boost_REAL NS_SWIFT_NAME(REAL) = 1,
Boost_LOGIT NS_SWIFT_NAME(LOGIT) = 2,
Boost_GENTLE NS_SWIFT_NAME(GENTLE) = 3
};
NS_ASSUME_NONNULL_BEGIN
// C++: class Boost
/**
* Boosted tree classifier derived from DTrees
*
* @see REF: ml_intro_boost
*
* Member of `Ml`
*/
CV_EXPORTS @interface Boost : DTrees
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ml::Boost> nativePtrBoost;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ml::Boost>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ml::Boost>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::ml::Boost::getBoostType()
//
/**
* @see `-setBoostType:`
*/
- (int)getBoostType NS_SWIFT_NAME(getBoostType());
//
// void cv::ml::Boost::setBoostType(int val)
//
/**
* getBoostType @see `-getBoostType:`
*/
- (void)setBoostType:(int)val NS_SWIFT_NAME(setBoostType(val:));
//
// int cv::ml::Boost::getWeakCount()
//
/**
* @see `-setWeakCount:`
*/
- (int)getWeakCount NS_SWIFT_NAME(getWeakCount());
//
// void cv::ml::Boost::setWeakCount(int val)
//
/**
* getWeakCount @see `-getWeakCount:`
*/
- (void)setWeakCount:(int)val NS_SWIFT_NAME(setWeakCount(val:));
//
// double cv::ml::Boost::getWeightTrimRate()
//
/**
* @see `-setWeightTrimRate:`
*/
- (double)getWeightTrimRate NS_SWIFT_NAME(getWeightTrimRate());
//
// void cv::ml::Boost::setWeightTrimRate(double val)
//
/**
* getWeightTrimRate @see `-getWeightTrimRate:`
*/
- (void)setWeightTrimRate:(double)val NS_SWIFT_NAME(setWeightTrimRate(val:));
//
// static Ptr_Boost cv::ml::Boost::create()
//
/**
* Creates the empty model.
* Use StatModel::train to train the model, Algorithm::load\<Boost\>(filename) to load the pre-trained model.
*/
+ (Boost*)create NS_SWIFT_NAME(create());
//
// static Ptr_Boost cv::ml::Boost::load(String filepath, String nodeName = String())
//
/**
* Loads and creates a serialized Boost from a file
*
* Use Boost::save to serialize and store an RTree to disk.
* Load the Boost from this file again, by calling this function with the path to the file.
* Optionally specify the node for the file containing the classifier
*
* @param filepath path to serialized Boost
* @param nodeName name of node containing the classifier
*/
+ (Boost*)load:(NSString*)filepath nodeName:(NSString*)nodeName NS_SWIFT_NAME(load(filepath:nodeName:));
/**
* Loads and creates a serialized Boost from a file
*
* Use Boost::save to serialize and store an RTree to disk.
* Load the Boost from this file again, by calling this function with the path to the file.
* Optionally specify the node for the file containing the classifier
*
* @param filepath path to serialized Boost
*/
+ (Boost*)load:(NSString*)filepath NS_SWIFT_NAME(load(filepath:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,114 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/xfeatures2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BoostDesc
/**
* Class implementing BoostDesc (Learning Image Descriptors with Boosting), described in
* CITE: Trzcinski13a and CITE: Trzcinski13b.
*
* desc type of descriptor to use, BoostDesc::BINBOOST_256 is default (256 bit long dimension)
* Available types are: BoostDesc::BGM, BoostDesc::BGM_HARD, BoostDesc::BGM_BILINEAR, BoostDesc::LBGM,
* BoostDesc::BINBOOST_64, BoostDesc::BINBOOST_128, BoostDesc::BINBOOST_256
* use_orientation sample patterns using keypoints orientation, enabled by default
* scale_factor adjust the sampling window of detected keypoints
* 6.25f is default and fits for KAZE, SURF detected keypoints window ratio
* 6.75f should be the scale for SIFT detected keypoints window ratio
* 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints window ratio
* 0.75f should be the scale for ORB keypoints ratio
* 1.50f was the default in original implementation
*
* NOTE: BGM is the base descriptor where each binary dimension is computed as the output of a single weak learner.
* BGM_HARD and BGM_BILINEAR refers to same BGM but use different type of gradient binning. In the BGM_HARD that
* use ASSIGN_HARD binning type the gradient is assigned to the nearest orientation bin. In the BGM_BILINEAR that use
* ASSIGN_BILINEAR binning type the gradient is assigned to the two neighbouring bins. In the BGM and all other modes that use
* ASSIGN_SOFT binning type the gradient is assigned to 8 nearest bins according to the cosine value between the gradient
* angle and the bin center. LBGM (alias FP-Boost) is the floating point extension where each dimension is computed
* as a linear combination of the weak learner responses. BINBOOST and subvariants are the binary extensions of LBGM
* where each bit is computed as a thresholded linear combination of a set of weak learners.
* BoostDesc header files (boostdesc_*.i) was exported from original binaries with export-boostdesc.py script from
* samples subfolder.
*
* Member of `Xfeatures2d`
*/
CV_EXPORTS @interface BoostDesc : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::xfeatures2d::BoostDesc> nativePtrBoostDesc;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::xfeatures2d::BoostDesc>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::xfeatures2d::BoostDesc>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_BoostDesc cv::xfeatures2d::BoostDesc::create(int desc = BoostDesc::BINBOOST_256, bool use_scale_orientation = true, float scale_factor = 6.25f)
//
+ (BoostDesc*)create:(int)desc use_scale_orientation:(BOOL)use_scale_orientation scale_factor:(float)scale_factor NS_SWIFT_NAME(create(desc:use_scale_orientation:scale_factor:));
+ (BoostDesc*)create:(int)desc use_scale_orientation:(BOOL)use_scale_orientation NS_SWIFT_NAME(create(desc:use_scale_orientation:));
+ (BoostDesc*)create:(int)desc NS_SWIFT_NAME(create(desc:));
+ (BoostDesc*)create NS_SWIFT_NAME(create());
//
// String cv::xfeatures2d::BoostDesc::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
//
// void cv::xfeatures2d::BoostDesc::setUseScaleOrientation(bool use_scale_orientation)
//
- (void)setUseScaleOrientation:(BOOL)use_scale_orientation NS_SWIFT_NAME(setUseScaleOrientation(use_scale_orientation:));
//
// bool cv::xfeatures2d::BoostDesc::getUseScaleOrientation()
//
- (BOOL)getUseScaleOrientation NS_SWIFT_NAME(getUseScaleOrientation());
//
// void cv::xfeatures2d::BoostDesc::setScaleFactor(float scale_factor)
//
- (void)setScaleFactor:(float)scale_factor NS_SWIFT_NAME(setScaleFactor(scale_factor:));
//
// float cv::xfeatures2d::BoostDesc::getScaleFactor()
//
- (float)getScaleFactor NS_SWIFT_NAME(getScaleFactor());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,92 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/xfeatures2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class BriefDescriptorExtractor
/**
* Class for computing BRIEF descriptors described in CITE: calon2010 .
*
* bytes legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64 .
* use_orientation sample patterns using keypoints orientation, disabled by default.
*
* Member of `Xfeatures2d`
*/
CV_EXPORTS @interface BriefDescriptorExtractor : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::xfeatures2d::BriefDescriptorExtractor> nativePtrBriefDescriptorExtractor;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::xfeatures2d::BriefDescriptorExtractor>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::xfeatures2d::BriefDescriptorExtractor>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_BriefDescriptorExtractor cv::xfeatures2d::BriefDescriptorExtractor::create(int bytes = 32, bool use_orientation = false)
//
+ (BriefDescriptorExtractor*)create:(int)bytes use_orientation:(BOOL)use_orientation NS_SWIFT_NAME(create(bytes:use_orientation:));
+ (BriefDescriptorExtractor*)create:(int)bytes NS_SWIFT_NAME(create(bytes:));
+ (BriefDescriptorExtractor*)create NS_SWIFT_NAME(create());
//
// void cv::xfeatures2d::BriefDescriptorExtractor::setDescriptorSize(int bytes)
//
- (void)setDescriptorSize:(int)bytes NS_SWIFT_NAME(setDescriptorSize(bytes:));
//
// int cv::xfeatures2d::BriefDescriptorExtractor::getDescriptorSize()
//
- (int)getDescriptorSize NS_SWIFT_NAME(getDescriptorSize());
//
// void cv::xfeatures2d::BriefDescriptorExtractor::setUseOrientation(bool use_orientation)
//
- (void)setUseOrientation:(BOOL)use_orientation NS_SWIFT_NAME(setUseOrientation(use_orientation:));
//
// bool cv::xfeatures2d::BriefDescriptorExtractor::getUseOrientation()
//
- (BOOL)getUseOrientation NS_SWIFT_NAME(getUseOrientation());
//
// String cv::xfeatures2d::BriefDescriptorExtractor::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,89 @@
//
// ByteVector.h
//
// Created by Giles Payne on 2020/01/04.
//
#pragma once
#import <Foundation/Foundation.h>
#ifdef __cplusplus
#import <vector>
#endif
#import "CVObjcUtil.h"
NS_ASSUME_NONNULL_BEGIN
/**
* Utility class to wrap a `std::vector<char>`
*/
CV_EXPORTS @interface ByteVector : NSObject
#pragma mark - Constructors
/**
* Create ByteVector and initialize with the contents of an NSData object
* @param data NSData containing raw byte array
*/
-(instancetype)initWithData:(NSData*)data;
/**
* Create ByteVector and initialize with the contents of another ByteVector object
* @param src ByteVector containing data to copy
*/
-(instancetype)initWithVector:(ByteVector*)src;
#ifdef __OBJC__
/**
* Create ByteVector from raw C array
* @param array The raw C array
* @elements elements The number of elements in the array
*/
-(instancetype)initWithNativeArray:(char*)array elements:(NSInteger)elements;
#endif
#ifdef __cplusplus
/**
* Create ByteVector from std::vector<char>
* @param src The std::vector<char> object to wrap
*/
-(instancetype)initWithStdVector:(std::vector<char>&)src;
+(instancetype)fromNative:(std::vector<char>&)src;
#endif
#pragma mark - Properties
/**
* Length of the vector
*/
@property(readonly) NSInteger length;
#ifdef __OBJC__
/**
* Raw C array
*/
@property(readonly) char* nativeArray;
#endif
#ifdef __cplusplus
/**
* The wrapped std::vector<char> object
*/
@property(readonly) std::vector<char>& nativeRef;
#endif
/**
* NSData object containing the raw byte data
*/
@property(readonly) NSData* data;
#pragma mark - Accessor method
/**
* Return array element
* @param index Index of the array element to return
*/
-(char)get:(NSInteger)index;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,103 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/imgproc.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
@class Size2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class CLAHE
/**
* Base class for Contrast Limited Adaptive Histogram Equalization.
*
* Member of `Imgproc`
*/
CV_EXPORTS @interface CLAHE : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::CLAHE> nativePtrCLAHE;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::CLAHE>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::CLAHE>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::CLAHE::apply(Mat src, Mat& dst)
//
/**
* Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
*
* @param src Source image of type CV_8UC1 or CV_16UC1.
* @param dst Destination image.
*/
- (void)apply:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(apply(src:dst:));
//
// void cv::CLAHE::setClipLimit(double clipLimit)
//
/**
* Sets threshold for contrast limiting.
*
* @param clipLimit threshold value.
*/
- (void)setClipLimit:(double)clipLimit NS_SWIFT_NAME(setClipLimit(clipLimit:));
//
// double cv::CLAHE::getClipLimit()
//
- (double)getClipLimit NS_SWIFT_NAME(getClipLimit());
//
// void cv::CLAHE::setTilesGridSize(Size tileGridSize)
//
/**
* Sets size of grid for histogram equalization. Input image will be divided into
* equally sized rectangular tiles.
*
* @param tileGridSize defines the number of tiles in row and column.
*/
- (void)setTilesGridSize:(Size2i*)tileGridSize NS_SWIFT_NAME(setTilesGridSize(tileGridSize:));
//
// Size cv::CLAHE::getTilesGridSize()
//
- (Size2i*)getTilesGridSize NS_SWIFT_NAME(getTilesGridSize());
//
// void cv::CLAHE::collectGarbage()
//
- (void)collectGarbage NS_SWIFT_NAME(collectGarbage());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,85 @@
//
// CVObjcUtil.h
//
// Created by Giles Payne on 2020/01/02.
//
#pragma once
#ifndef CV_EXPORTS
#ifdef __cplusplus
#define CV_EXPORTS __attribute__ ((visibility ("default")))
#else
#define CV_EXPORTS
#endif
#endif
#ifdef __cplusplus
#import <vector>
template <typename CV, typename OBJC> std::vector<CV> objc2cv(NSArray<OBJC*>* _Nonnull array, CV& (* _Nonnull converter)(OBJC* _Nonnull)) {
std::vector<CV> ret;
for (OBJC* obj in array) {
ret.push_back(converter(obj));
}
return ret;
}
#define OBJC2CV(CV_CLASS, OBJC_CLASS, v, a) \
std::vector<CV_CLASS> v = objc2cv<CV_CLASS, OBJC_CLASS>(a, [](OBJC_CLASS* objc) -> CV_CLASS& { return objc.nativeRef; })
#define OBJC2CV_CUSTOM(CV_CLASS, OBJC_CLASS, v, a, CONV) \
std::vector<CV_CLASS> v; \
for (OBJC_CLASS* obj in a) { \
CV_CLASS tmp = CONV(obj); \
v.push_back(tmp); \
}
template <typename CV, typename OBJC> void cv2objc(std::vector<CV>& vector, NSMutableArray<OBJC*>* _Nonnull array, OBJC* _Nonnull (* _Nonnull converter)(CV&)) {
[array removeAllObjects];
for (size_t index = 0; index < vector.size(); index++) {
[array addObject:converter(vector[index])];
}
}
#define CV2OBJC(CV_CLASS, OBJC_CLASS, v, a) \
cv2objc<CV_CLASS, OBJC_CLASS>(v, a, [](CV_CLASS& cv) -> OBJC_CLASS* { return [OBJC_CLASS fromNative:cv]; })
#define CV2OBJC_CUSTOM(CV_CLASS, OBJC_CLASS, v, a, UNCONV) \
[a removeAllObjects]; \
for (size_t index = 0; index < v.size(); index++) { \
OBJC_CLASS *tmp = UNCONV(v[index]); \
[a addObject:tmp]; \
}
template <typename CV, typename OBJC> std::vector<std::vector<CV>> objc2cv2(NSArray<NSArray<OBJC*>*>* _Nonnull array, CV& (* _Nonnull converter)(OBJC* _Nonnull)) {
std::vector<std::vector<CV>> ret;
for (NSArray<OBJC*>* innerArray in array) {
std::vector<CV> innerVector;
for (OBJC* obj in innerArray) {
innerVector.push_back(converter(obj));
}
ret.push_back(innerVector);
}
return ret;
}
#define OBJC2CV2(CV_CLASS, OBJC_CLASS, v, a) \
std::vector<std::vector<CV_CLASS>> v = objc2cv2<CV_CLASS, OBJC_CLASS>(a, [](OBJC_CLASS* objc) -> CV_CLASS& { return objc.nativeRef; })
template <typename CV, typename OBJC> void cv2objc2(std::vector<std::vector<CV>>& vector, NSMutableArray<NSMutableArray<OBJC*>*>* _Nonnull array, OBJC* _Nonnull (* _Nonnull converter)(CV&)) {
[array removeAllObjects];
for (size_t index = 0; index < vector.size(); index++) {
std::vector<CV>& innerVector = vector[index];
NSMutableArray<OBJC*>* innerArray = [NSMutableArray arrayWithCapacity:innerVector.size()];
for (size_t index2 = 0; index2 < innerVector.size(); index2++) {
[innerArray addObject:converter(innerVector[index2])];
}
[array addObject:innerArray];
}
}
#define CV2OBJC2(CV_CLASS, OBJC_CLASS, v, a) \
cv2objc2<CV_CLASS, OBJC_CLASS>(v, a, [](CV_CLASS& cv) -> OBJC_CLASS* { return [OBJC_CLASS fromNative:cv]; })
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,62 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/photo.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class CalibrateCRF
/**
* The base class for camera response calibration algorithms.
*
* Member of `Photo`
*/
CV_EXPORTS @interface CalibrateCRF : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::CalibrateCRF> nativePtrCalibrateCRF;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::CalibrateCRF>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::CalibrateCRF>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::CalibrateCRF::process(vector_Mat src, Mat& dst, Mat times)
//
/**
* Recovers inverse camera response.
*
* @param src vector of input images
* @param dst 256x1 matrix with inverse camera response function
* @param times vector of exposure time values for each image
*/
- (void)process:(NSArray<Mat*>*)src dst:(Mat*)dst times:(Mat*)times NS_SWIFT_NAME(process(src:dst:times:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,89 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/photo.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "CalibrateCRF.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class CalibrateDebevec
/**
* Inverse camera response function is extracted for each brightness value by minimizing an objective
* function as linear system. Objective function is constructed using pixel values on the same position
* in all images, extra term is added to make the result smoother.
*
* For more information see CITE: DM97 .
*
* Member of `Photo`
*/
CV_EXPORTS @interface CalibrateDebevec : CalibrateCRF
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::CalibrateDebevec> nativePtrCalibrateDebevec;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::CalibrateDebevec>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::CalibrateDebevec>)nativePtr;
#endif
#pragma mark - Methods
//
// float cv::CalibrateDebevec::getLambda()
//
- (float)getLambda NS_SWIFT_NAME(getLambda());
//
// void cv::CalibrateDebevec::setLambda(float lambda)
//
- (void)setLambda:(float)lambda NS_SWIFT_NAME(setLambda(lambda:));
//
// int cv::CalibrateDebevec::getSamples()
//
- (int)getSamples NS_SWIFT_NAME(getSamples());
//
// void cv::CalibrateDebevec::setSamples(int samples)
//
- (void)setSamples:(int)samples NS_SWIFT_NAME(setSamples(samples:));
//
// bool cv::CalibrateDebevec::getRandom()
//
- (BOOL)getRandom NS_SWIFT_NAME(getRandom());
//
// void cv::CalibrateDebevec::setRandom(bool random)
//
- (void)setRandom:(BOOL)random NS_SWIFT_NAME(setRandom(random:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,82 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/photo.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "CalibrateCRF.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class CalibrateRobertson
/**
* Inverse camera response function is extracted for each brightness value by minimizing an objective
* function as linear system. This algorithm uses all image pixels.
*
* For more information see CITE: RB99 .
*
* Member of `Photo`
*/
CV_EXPORTS @interface CalibrateRobertson : CalibrateCRF
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::CalibrateRobertson> nativePtrCalibrateRobertson;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::CalibrateRobertson>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::CalibrateRobertson>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::CalibrateRobertson::getMaxIter()
//
- (int)getMaxIter NS_SWIFT_NAME(getMaxIter());
//
// void cv::CalibrateRobertson::setMaxIter(int max_iter)
//
- (void)setMaxIter:(int)max_iter NS_SWIFT_NAME(setMaxIter(max_iter:));
//
// float cv::CalibrateRobertson::getThreshold()
//
- (float)getThreshold NS_SWIFT_NAME(getThreshold());
//
// void cv::CalibrateRobertson::setThreshold(float threshold)
//
- (void)setThreshold:(float)threshold NS_SWIFT_NAME(setThreshold(threshold:));
//
// Mat cv::CalibrateRobertson::getRadiance()
//
- (Mat*)getRadiance NS_SWIFT_NAME(getRadiance());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,454 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class DoubleVector;
@class IntVector;
@class Mat;
@class Rect2i;
@class Size2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class CascadeClassifier
/**
* Cascade classifier class for object detection.
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface CascadeClassifier : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::CascadeClassifier> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::CascadeClassifier>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::CascadeClassifier>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::CascadeClassifier::CascadeClassifier()
//
- (instancetype)init;
//
// cv::CascadeClassifier::CascadeClassifier(String filename)
//
/**
* Loads a classifier from a file.
*
* @param filename Name of the file from which the classifier is loaded.
*/
- (instancetype)initWithFilename:(NSString*)filename;
//
// bool cv::CascadeClassifier::empty()
//
/**
* Checks whether the classifier has been loaded.
*/
- (BOOL)empty NS_SWIFT_NAME(empty());
//
// bool cv::CascadeClassifier::load(String filename)
//
/**
* Loads a classifier from a file.
*
* @param filename Name of the file from which the classifier is loaded. The file may contain an old
* HAAR classifier trained by the haartraining application or a new cascade classifier trained by the
* traincascade application.
*/
- (BOOL)load:(NSString*)filename NS_SWIFT_NAME(load(filename:));
//
// bool cv::CascadeClassifier::read(FileNode node)
//
// Unknown type 'FileNode' (I), skipping the function
//
// void cv::CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size())
//
/**
* Detects objects of different sizes in the input image. The detected objects are returned as a list
* of rectangles.
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* @param flags Parameter with the same meaning for an old cascade as in the function
* cvHaarDetectObjects. It is not used for a new cascade.
* @param minSize Minimum possible object size. Objects smaller than that are ignored.
* @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
*/
- (void)detectMultiScale:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize maxSize:(Size2i*)maxSize NS_SWIFT_NAME(detectMultiScale(image:objects:scaleFactor:minNeighbors:flags:minSize:maxSize:));
/**
* Detects objects of different sizes in the input image. The detected objects are returned as a list
* of rectangles.
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* @param flags Parameter with the same meaning for an old cascade as in the function
* cvHaarDetectObjects. It is not used for a new cascade.
* @param minSize Minimum possible object size. Objects smaller than that are ignored.
*/
- (void)detectMultiScale:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize NS_SWIFT_NAME(detectMultiScale(image:objects:scaleFactor:minNeighbors:flags:minSize:));
/**
* Detects objects of different sizes in the input image. The detected objects are returned as a list
* of rectangles.
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* @param flags Parameter with the same meaning for an old cascade as in the function
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags NS_SWIFT_NAME(detectMultiScale(image:objects:scaleFactor:minNeighbors:flags:));
/**
* Detects objects of different sizes in the input image. The detected objects are returned as a list
* of rectangles.
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors NS_SWIFT_NAME(detectMultiScale(image:objects:scaleFactor:minNeighbors:));
/**
* Detects objects of different sizes in the input image. The detected objects are returned as a list
* of rectangles.
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* to retain it.
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects scaleFactor:(double)scaleFactor NS_SWIFT_NAME(detectMultiScale(image:objects:scaleFactor:));
/**
* Detects objects of different sizes in the input image. The detected objects are returned as a list
* of rectangles.
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* to retain it.
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects NS_SWIFT_NAME(detectMultiScale(image:objects:));
//
// void cv::CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, vector_int& numDetections, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size())
//
/**
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
* of detections is the number of neighboring positively classified rectangles that were joined
* together to form the object.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* @param flags Parameter with the same meaning for an old cascade as in the function
* cvHaarDetectObjects. It is not used for a new cascade.
* @param minSize Minimum possible object size. Objects smaller than that are ignored.
* @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
*/
- (void)detectMultiScale2:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects numDetections:(IntVector*)numDetections scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize maxSize:(Size2i*)maxSize NS_SWIFT_NAME(detectMultiScale(image:objects:numDetections:scaleFactor:minNeighbors:flags:minSize:maxSize:));
/**
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
* of detections is the number of neighboring positively classified rectangles that were joined
* together to form the object.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* @param flags Parameter with the same meaning for an old cascade as in the function
* cvHaarDetectObjects. It is not used for a new cascade.
* @param minSize Minimum possible object size. Objects smaller than that are ignored.
*/
- (void)detectMultiScale2:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects numDetections:(IntVector*)numDetections scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize NS_SWIFT_NAME(detectMultiScale(image:objects:numDetections:scaleFactor:minNeighbors:flags:minSize:));
/**
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
* of detections is the number of neighboring positively classified rectangles that were joined
* together to form the object.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* @param flags Parameter with the same meaning for an old cascade as in the function
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale2:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects numDetections:(IntVector*)numDetections scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags NS_SWIFT_NAME(detectMultiScale(image:objects:numDetections:scaleFactor:minNeighbors:flags:));
/**
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
* of detections is the number of neighboring positively classified rectangles that were joined
* together to form the object.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
* to retain it.
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale2:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects numDetections:(IntVector*)numDetections scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors NS_SWIFT_NAME(detectMultiScale(image:objects:numDetections:scaleFactor:minNeighbors:));
/**
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
* of detections is the number of neighboring positively classified rectangles that were joined
* together to form the object.
* @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
* to retain it.
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale2:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects numDetections:(IntVector*)numDetections scaleFactor:(double)scaleFactor NS_SWIFT_NAME(detectMultiScale(image:objects:numDetections:scaleFactor:));
/**
*
* @param image Matrix of the type CV_8U containing an image where objects are detected.
* @param objects Vector of rectangles where each rectangle contains the detected object, the
* rectangles may be partially outside the original image.
* @param numDetections Vector of detection numbers for the corresponding objects. An object's number
* of detections is the number of neighboring positively classified rectangles that were joined
* together to form the object.
* to retain it.
* cvHaarDetectObjects. It is not used for a new cascade.
*/
- (void)detectMultiScale2:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects numDetections:(IntVector*)numDetections NS_SWIFT_NAME(detectMultiScale(image:objects:numDetections:));
//
// void cv::CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, vector_int& rejectLevels, vector_double& levelWeights, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size(), bool outputRejectLevels = false)
//
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize maxSize:(Size2i*)maxSize outputRejectLevels:(BOOL)outputRejectLevels NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:scaleFactor:minNeighbors:flags:minSize:maxSize:outputRejectLevels:));
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize maxSize:(Size2i*)maxSize NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:scaleFactor:minNeighbors:flags:minSize:maxSize:));
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags minSize:(Size2i*)minSize NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:scaleFactor:minNeighbors:flags:minSize:));
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors flags:(int)flags NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:scaleFactor:minNeighbors:flags:));
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights scaleFactor:(double)scaleFactor minNeighbors:(int)minNeighbors NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:scaleFactor:minNeighbors:));
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights scaleFactor:(double)scaleFactor NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:scaleFactor:));
/**
*
* This function allows you to retrieve the final stage decision certainty of classification.
* For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
* For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
* This value can then be used to separate strong from weaker classifications.
*
* A code sample on how to use it efficiently can be found below:
*
* Mat img;
* vector<double> weights;
* vector<int> levels;
* vector<Rect> detections;
* CascadeClassifier model("/path/to/your/model.xml");
* model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
* cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
*
*/
- (void)detectMultiScale3:(Mat*)image objects:(NSMutableArray<Rect2i*>*)objects rejectLevels:(IntVector*)rejectLevels levelWeights:(DoubleVector*)levelWeights NS_SWIFT_NAME(detectMultiScale(image:objects:rejectLevels:levelWeights:));
//
// bool cv::CascadeClassifier::isOldFormatCascade()
//
- (BOOL)isOldFormatCascade NS_SWIFT_NAME(isOldFormatCascade());
//
// Size cv::CascadeClassifier::getOriginalWindowSize()
//
- (Size2i*)getOriginalWindowSize NS_SWIFT_NAME(getOriginalWindowSize());
//
// int cv::CascadeClassifier::getFeatureType()
//
- (int)getFeatureType NS_SWIFT_NAME(getFeatureType());
//
// static bool cv::CascadeClassifier::convert(String oldcascade, String newcascade)
//
+ (BOOL)convert:(NSString*)oldcascade newcascade:(NSString*)newcascade NS_SWIFT_NAME(convert(oldcascade:newcascade:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,150 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/aruco_board.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Board.h"
@class Dictionary;
@class Mat;
@class Point3f;
@class Size2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class CharucoBoard
/**
* ChArUco board is a planar chessboard where the markers are placed inside the white squares of a chessboard.
*
* The benefits of ChArUco boards is that they provide both, ArUco markers versatility and chessboard corner precision,
* which is important for calibration and pose estimation. The board image can be drawn using generateImage() method.
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface CharucoBoard : Board
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::CharucoBoard> nativePtrCharucoBoard;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::CharucoBoard>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::CharucoBoard>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::CharucoBoard::CharucoBoard(Size size, float squareLength, float markerLength, Dictionary dictionary, Mat ids = Mat())
//
/**
* CharucoBoard constructor
*
* @param size number of chessboard squares in x and y directions
* @param squareLength squareLength chessboard square side length (normally in meters)
* @param markerLength marker side length (same unit than squareLength)
* @param dictionary dictionary of markers indicating the type of markers
* @param ids array of id used markers
* The first markers in the dictionary are used to fill the white chessboard squares.
*/
- (instancetype)initWithSize:(Size2i*)size squareLength:(float)squareLength markerLength:(float)markerLength dictionary:(Dictionary*)dictionary ids:(Mat*)ids;
/**
* CharucoBoard constructor
*
* @param size number of chessboard squares in x and y directions
* @param squareLength squareLength chessboard square side length (normally in meters)
* @param markerLength marker side length (same unit than squareLength)
* @param dictionary dictionary of markers indicating the type of markers
* The first markers in the dictionary are used to fill the white chessboard squares.
*/
- (instancetype)initWithSize:(Size2i*)size squareLength:(float)squareLength markerLength:(float)markerLength dictionary:(Dictionary*)dictionary;
//
// void cv::aruco::CharucoBoard::setLegacyPattern(bool legacyPattern)
//
/**
* set legacy chessboard pattern.
*
* Legacy setting creates chessboard patterns starting with a white box in the upper left corner
* if there is an even row count of chessboard boxes, otherwise it starts with a black box.
* This setting ensures compatibility to patterns created with OpenCV versions prior OpenCV 4.6.0.
* See https://github.com/opencv/opencv/issues/23152.
*
* Default value: false.
*/
- (void)setLegacyPattern:(BOOL)legacyPattern NS_SWIFT_NAME(setLegacyPattern(legacyPattern:));
//
// bool cv::aruco::CharucoBoard::getLegacyPattern()
//
- (BOOL)getLegacyPattern NS_SWIFT_NAME(getLegacyPattern());
//
// Size cv::aruco::CharucoBoard::getChessboardSize()
//
- (Size2i*)getChessboardSize NS_SWIFT_NAME(getChessboardSize());
//
// float cv::aruco::CharucoBoard::getSquareLength()
//
- (float)getSquareLength NS_SWIFT_NAME(getSquareLength());
//
// float cv::aruco::CharucoBoard::getMarkerLength()
//
- (float)getMarkerLength NS_SWIFT_NAME(getMarkerLength());
//
// vector_Point3f cv::aruco::CharucoBoard::getChessboardCorners()
//
/**
* get CharucoBoard::chessboardCorners
*/
- (NSArray<Point3f*>*)getChessboardCorners NS_SWIFT_NAME(getChessboardCorners());
//
// bool cv::aruco::CharucoBoard::checkCharucoCornersCollinear(Mat charucoIds)
//
/**
* check whether the ChArUco markers are collinear
*
* @param charucoIds list of identifiers for each corner in charucoCorners per frame.
* @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not.
* solvePnP, calibration functions will fail if the corners are collinear (true).
*
* The number of ids in charucoIDs should be <= the number of chessboard corners in the board.
* This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false).
* Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases:
* for number of charucoIDs <= 2,the function returns true.
*/
- (BOOL)checkCharucoCornersCollinear:(Mat*)charucoIds NS_SWIFT_NAME(checkCharucoCornersCollinear(charucoIds:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,280 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/charuco_detector.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class CharucoBoard;
@class CharucoParameters;
@class DetectorParameters;
@class Mat;
@class RefineParameters;
NS_ASSUME_NONNULL_BEGIN
// C++: class CharucoDetector
/**
* The CharucoDetector module
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface CharucoDetector : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::CharucoDetector> nativePtrCharucoDetector;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::CharucoDetector>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::CharucoDetector>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::CharucoDetector::CharucoDetector(CharucoBoard board, CharucoParameters charucoParams = CharucoParameters(), DetectorParameters detectorParams = DetectorParameters(), RefineParameters refineParams = RefineParameters())
//
/**
* Basic CharucoDetector constructor
*
* @param board ChAruco board
* @param charucoParams charuco detection parameters
* @param detectorParams marker detection parameters
* @param refineParams marker refine detection parameters
*/
- (instancetype)initWithBoard:(CharucoBoard*)board charucoParams:(CharucoParameters*)charucoParams detectorParams:(DetectorParameters*)detectorParams refineParams:(RefineParameters*)refineParams;
/**
* Basic CharucoDetector constructor
*
* @param board ChAruco board
* @param charucoParams charuco detection parameters
* @param detectorParams marker detection parameters
*/
- (instancetype)initWithBoard:(CharucoBoard*)board charucoParams:(CharucoParameters*)charucoParams detectorParams:(DetectorParameters*)detectorParams;
/**
* Basic CharucoDetector constructor
*
* @param board ChAruco board
* @param charucoParams charuco detection parameters
*/
- (instancetype)initWithBoard:(CharucoBoard*)board charucoParams:(CharucoParameters*)charucoParams;
/**
* Basic CharucoDetector constructor
*
* @param board ChAruco board
*/
- (instancetype)initWithBoard:(CharucoBoard*)board;
//
// CharucoBoard cv::aruco::CharucoDetector::getBoard()
//
- (CharucoBoard*)getBoard NS_SWIFT_NAME(getBoard());
//
// void cv::aruco::CharucoDetector::setBoard(CharucoBoard board)
//
- (void)setBoard:(CharucoBoard*)board NS_SWIFT_NAME(setBoard(board:));
//
// CharucoParameters cv::aruco::CharucoDetector::getCharucoParameters()
//
- (CharucoParameters*)getCharucoParameters NS_SWIFT_NAME(getCharucoParameters());
//
// void cv::aruco::CharucoDetector::setCharucoParameters(CharucoParameters charucoParameters)
//
- (void)setCharucoParameters:(CharucoParameters*)charucoParameters NS_SWIFT_NAME(setCharucoParameters(charucoParameters:));
//
// DetectorParameters cv::aruco::CharucoDetector::getDetectorParameters()
//
- (DetectorParameters*)getDetectorParameters NS_SWIFT_NAME(getDetectorParameters());
//
// void cv::aruco::CharucoDetector::setDetectorParameters(DetectorParameters detectorParameters)
//
- (void)setDetectorParameters:(DetectorParameters*)detectorParameters NS_SWIFT_NAME(setDetectorParameters(detectorParameters:));
//
// RefineParameters cv::aruco::CharucoDetector::getRefineParameters()
//
- (RefineParameters*)getRefineParameters NS_SWIFT_NAME(getRefineParameters());
//
// void cv::aruco::CharucoDetector::setRefineParameters(RefineParameters refineParameters)
//
- (void)setRefineParameters:(RefineParameters*)refineParameters NS_SWIFT_NAME(setRefineParameters(refineParameters:));
//
// void cv::aruco::CharucoDetector::detectBoard(Mat image, Mat& charucoCorners, Mat& charucoIds, vector_Mat& markerCorners = vector_Mat(), Mat& markerIds = Mat())
//
/**
* detect aruco markers and interpolate position of ChArUco board corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param charucoCorners interpolated chessboard corners.
* @param charucoIds interpolated chessboard corners identifiers.
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* @param markerIds list of identifiers for each marker in corners.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers.
*
* If markerCorners and markerCorners are empty, the detectMarkers() will run and detect aruco markers and ids.
*
* If camera parameters are provided, the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is also returned in charucoIds.
* @see `findChessboardCorners`
*/
- (void)detectBoard:(Mat*)image charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds markerCorners:(NSMutableArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds NS_SWIFT_NAME(detectBoard(image:charucoCorners:charucoIds:markerCorners:markerIds:));
/**
* detect aruco markers and interpolate position of ChArUco board corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param charucoCorners interpolated chessboard corners.
* @param charucoIds interpolated chessboard corners identifiers.
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers.
*
* If markerCorners and markerCorners are empty, the detectMarkers() will run and detect aruco markers and ids.
*
* If camera parameters are provided, the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is also returned in charucoIds.
* @see `findChessboardCorners`
*/
- (void)detectBoard:(Mat*)image charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds markerCorners:(NSMutableArray<Mat*>*)markerCorners NS_SWIFT_NAME(detectBoard(image:charucoCorners:charucoIds:markerCorners:));
/**
* detect aruco markers and interpolate position of ChArUco board corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param charucoCorners interpolated chessboard corners.
* @param charucoIds interpolated chessboard corners identifiers.
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers.
*
* If markerCorners and markerCorners are empty, the detectMarkers() will run and detect aruco markers and ids.
*
* If camera parameters are provided, the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is also returned in charucoIds.
* @see `findChessboardCorners`
*/
- (void)detectBoard:(Mat*)image charucoCorners:(Mat*)charucoCorners charucoIds:(Mat*)charucoIds NS_SWIFT_NAME(detectBoard(image:charucoCorners:charucoIds:));
//
// void cv::aruco::CharucoDetector::detectDiamonds(Mat image, vector_Mat& diamondCorners, Mat& diamondIds, vector_Mat& markerCorners = vector_Mat(), Mat& markerIds = Mat())
//
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param markerCorners list of detected marker corners from detectMarkers function.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* @param markerIds list of marker ids in markerCorners.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*/
- (void)detectDiamonds:(Mat*)image diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds markerCorners:(NSMutableArray<Mat*>*)markerCorners markerIds:(Mat*)markerIds NS_SWIFT_NAME(detectDiamonds(image:diamondCorners:diamondIds:markerCorners:markerIds:));
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param markerCorners list of detected marker corners from detectMarkers function.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*/
- (void)detectDiamonds:(Mat*)image diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds markerCorners:(NSMutableArray<Mat*>*)markerCorners NS_SWIFT_NAME(detectDiamonds(image:diamondCorners:diamondIds:markerCorners:));
/**
* Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
* If markerCorners and markerCorners are empty, the function detect aruco markers and ids.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*/
- (void)detectDiamonds:(Mat*)image diamondCorners:(NSMutableArray<Mat*>*)diamondCorners diamondIds:(Mat*)diamondIds NS_SWIFT_NAME(detectDiamonds(image:diamondCorners:diamondIds:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,80 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/charuco_detector.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class CharucoParameters
/**
* The CharucoParameters module
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface CharucoParameters : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::CharucoParameters> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::CharucoParameters>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::CharucoParameters>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::CharucoParameters::CharucoParameters()
//
- (instancetype)init;
//
// C++: Mat cv::aruco::CharucoParameters::cameraMatrix
//
@property Mat* cameraMatrix;
//
// C++: Mat cv::aruco::CharucoParameters::distCoeffs
//
@property Mat* distCoeffs;
//
// C++: int cv::aruco::CharucoParameters::minMarkers
//
@property int minMarkers;
//
// C++: bool cv::aruco::CharucoParameters::tryRefineMarkers
//
@property BOOL tryRefineMarkers;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,152 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/calib3d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Size2f;
// C++: enum GridType (cv.CirclesGridFinderParameters.GridType)
typedef NS_ENUM(int, GridType) {
CirclesGridFinderParameters_SYMMETRIC_GRID NS_SWIFT_NAME(SYMMETRIC_GRID) = 0,
CirclesGridFinderParameters_ASYMMETRIC_GRID NS_SWIFT_NAME(ASYMMETRIC_GRID) = 1
};
NS_ASSUME_NONNULL_BEGIN
// C++: class CirclesGridFinderParameters
/**
* The CirclesGridFinderParameters module
*
* Member of `Calib3d`
*/
CV_EXPORTS @interface CirclesGridFinderParameters : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::CirclesGridFinderParameters> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::CirclesGridFinderParameters>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::CirclesGridFinderParameters>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::CirclesGridFinderParameters::CirclesGridFinderParameters()
//
- (instancetype)init;
//
// C++: Size2f cv::CirclesGridFinderParameters::densityNeighborhoodSize
//
@property Size2f* densityNeighborhoodSize;
//
// C++: float cv::CirclesGridFinderParameters::minDensity
//
@property float minDensity;
//
// C++: int cv::CirclesGridFinderParameters::kmeansAttempts
//
@property int kmeansAttempts;
//
// C++: int cv::CirclesGridFinderParameters::minDistanceToAddKeypoint
//
@property int minDistanceToAddKeypoint;
//
// C++: int cv::CirclesGridFinderParameters::keypointScale
//
@property int keypointScale;
//
// C++: float cv::CirclesGridFinderParameters::minGraphConfidence
//
@property float minGraphConfidence;
//
// C++: float cv::CirclesGridFinderParameters::vertexGain
//
@property float vertexGain;
//
// C++: float cv::CirclesGridFinderParameters::vertexPenalty
//
@property float vertexPenalty;
//
// C++: float cv::CirclesGridFinderParameters::existingVertexGain
//
@property float existingVertexGain;
//
// C++: float cv::CirclesGridFinderParameters::edgeGain
//
@property float edgeGain;
//
// C++: float cv::CirclesGridFinderParameters::edgePenalty
//
@property float edgePenalty;
//
// C++: float cv::CirclesGridFinderParameters::convexHullFactor
//
@property float convexHullFactor;
//
// C++: float cv::CirclesGridFinderParameters::minRNGEdgeSwitchDist
//
@property float minRNGEdgeSwitchDist;
//
// C++: float cv::CirclesGridFinderParameters::squareSize
//
@property float squareSize;
//
// C++: float cv::CirclesGridFinderParameters::maxRectifiedDistance
//
@property float maxRectifiedDistance;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,116 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/dnn.hpp"
#import "opencv2/dnn/dnn.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Model.h"
@class Mat;
@class Net;
NS_ASSUME_NONNULL_BEGIN
// C++: class ClassificationModel
/**
* This class represents high-level API for classification models.
*
* ClassificationModel allows to set params for preprocessing input image.
* ClassificationModel creates net from file with trained weights and config,
* sets preprocessing input, runs forward pass and return top-1 prediction.
*
* Member of `Dnn`
*/
CV_EXPORTS @interface ClassificationModel : Model
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::dnn::ClassificationModel> nativePtrClassificationModel;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::dnn::ClassificationModel>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::dnn::ClassificationModel>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::dnn::ClassificationModel::ClassificationModel(String model, String config = "")
//
/**
* Create classification model from network represented in one of the supported formats.
* An order of @p model and @p config arguments does not matter.
* @param model Binary file contains trained weights.
* @param config Text file contains network configuration.
*/
- (instancetype)initWithModel:(NSString*)model config:(NSString*)config;
/**
* Create classification model from network represented in one of the supported formats.
* An order of @p model and @p config arguments does not matter.
* @param model Binary file contains trained weights.
*/
- (instancetype)initWithModel:(NSString*)model;
//
// cv::dnn::ClassificationModel::ClassificationModel(Net network)
//
/**
* Create model from deep learning network.
* @param network Net object.
*/
- (instancetype)initWithNetwork:(Net*)network;
//
// ClassificationModel cv::dnn::ClassificationModel::setEnableSoftmaxPostProcessing(bool enable)
//
/**
* Set enable/disable softmax post processing option.
*
* If this option is true, softmax is applied after forward inference within the classify() function
* to convert the confidences range to [0.0-1.0].
* This function allows you to toggle this behavior.
* Please turn true when not contain softmax layer in model.
* @param enable Set enable softmax post processing within the classify() function.
*/
- (ClassificationModel*)setEnableSoftmaxPostProcessing:(BOOL)enable NS_SWIFT_NAME(setEnableSoftmaxPostProcessing(enable:));
//
// bool cv::dnn::ClassificationModel::getEnableSoftmaxPostProcessing()
//
/**
* Get enable/disable softmax post processing option.
*
* This option defaults to false, softmax post processing is not applied within the classify() function.
*/
- (BOOL)getEnableSoftmaxPostProcessing NS_SWIFT_NAME(getEnableSoftmaxPostProcessing());
//
// void cv::dnn::ClassificationModel::classify(Mat frame, int& classId, float& conf)
//
- (void)classify:(Mat*)frame classId:(int*)classId conf:(float*)conf NS_SWIFT_NAME(classify(frame:classId:conf:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,58 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/img_hash.hpp"
#import "opencv2/img_hash/color_moment_hash.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "ImgHashBase.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class ColorMomentHash
/**
* Image hash based on color moments.
*
* See CITE: tang2012perceptual for details.
*
* Member of `Img_hash`
*/
CV_EXPORTS @interface ColorMomentHash : ImgHashBase
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::img_hash::ColorMomentHash> nativePtrColorMomentHash;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::img_hash::ColorMomentHash>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::img_hash::ColorMomentHash>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_ColorMomentHash cv::img_hash::ColorMomentHash::create()
//
+ (ColorMomentHash*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,117 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/fourier_descriptors.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class ContourFitting
/**
* Class for ContourFitting algorithms.
* ContourFitting match two contours `$$ z_a $$` and `$$ z_b $$` minimizing distance
* `$$ d(z_a,z_b)=\sum (a_n - s b_n e^{j(n \alpha +\phi )})^2 $$` where `$$ a_n $$` and `$$ b_n $$` are Fourier descriptors of `$$ z_a $$` and `$$ z_b $$` and s is a scaling factor and `$$ \phi $$` is angle rotation and `$$ \alpha $$` is starting point factor adjustement
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface ContourFitting : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::ContourFitting> nativePtrContourFitting;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::ContourFitting>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::ContourFitting>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::ContourFitting::estimateTransformation(Mat src, Mat dst, Mat& alphaPhiST, double& dist, bool fdContour = false)
//
/**
* Fit two closed curves using fourier descriptors. More details in CITE: PersoonFu1977 and CITE: BergerRaghunathan1998
*
* @param src Contour defining first shape.
* @param dst Contour defining second shape (Target).
* @param alphaPhiST : `$$ \alpha $$`=alphaPhiST(0,0), `$$ \phi $$`=alphaPhiST(0,1) (in radian), s=alphaPhiST(0,2), Tx=alphaPhiST(0,3), Ty=alphaPhiST(0,4) rotation center
* @param dist distance between src and dst after matching.
* @param fdContour false then src and dst are contours and true src and dst are fourier descriptors.
*/
- (void)estimateTransformation:(Mat*)src dst:(Mat*)dst alphaPhiST:(Mat*)alphaPhiST dist:(double*)dist fdContour:(BOOL)fdContour NS_SWIFT_NAME(estimateTransformation(src:dst:alphaPhiST:dist:fdContour:));
/**
* Fit two closed curves using fourier descriptors. More details in CITE: PersoonFu1977 and CITE: BergerRaghunathan1998
*
* @param src Contour defining first shape.
* @param dst Contour defining second shape (Target).
* @param alphaPhiST : `$$ \alpha $$`=alphaPhiST(0,0), `$$ \phi $$`=alphaPhiST(0,1) (in radian), s=alphaPhiST(0,2), Tx=alphaPhiST(0,3), Ty=alphaPhiST(0,4) rotation center
* @param dist distance between src and dst after matching.
*/
- (void)estimateTransformation:(Mat*)src dst:(Mat*)dst alphaPhiST:(Mat*)alphaPhiST dist:(double*)dist NS_SWIFT_NAME(estimateTransformation(src:dst:alphaPhiST:dist:));
//
// void cv::ximgproc::ContourFitting::setCtrSize(int n)
//
/**
* set number of Fourier descriptors used in estimateTransformation
*
* @param n number of Fourier descriptors equal to number of contour points after resampling.
*/
- (void)setCtrSize:(int)n NS_SWIFT_NAME(setCtrSize(n:));
//
// void cv::ximgproc::ContourFitting::setFDSize(int n)
//
/**
* set number of Fourier descriptors when estimateTransformation used vector<Point>
*
* @param n number of fourier descriptors used for optimal curve matching.
*/
- (void)setFDSize:(int)n NS_SWIFT_NAME(setFDSize(n:));
//
// int cv::ximgproc::ContourFitting::getCtrSize()
//
/**
* @return number of fourier descriptors
*/
- (int)getCtrSize NS_SWIFT_NAME(getCtrSize());
//
// int cv::ximgproc::ContourFitting::getFDSize()
//
/**
* @return number of fourier descriptors used for optimal curve matching
*/
- (int)getFDSize NS_SWIFT_NAME(getFDSize());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,100 @@
//
// Converters.h
//
// Created by Giles Payne on 2020/03/03.
//
#pragma once
#ifdef __cplusplus
#import <opencv2/core.hpp>
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Mat.h"
#import "CvType.h"
#import "Point2i.h"
#import "Point2f.h"
#import "Point2d.h"
#import "Point3i.h"
#import "Point3f.h"
#import "Point3d.h"
#import "Rect2i.h"
#import "Rect2d.h"
#import "KeyPoint.h"
#import "DMatch.h"
#import "RotatedRect.h"
NS_ASSUME_NONNULL_BEGIN
CV_EXPORTS @interface Converters : NSObject
+ (Mat*)vector_Point_to_Mat:(NSArray<Point2i*>*)pts NS_SWIFT_NAME(vector_Point_to_Mat(_:));
+ (NSArray<Point2i*>*)Mat_to_vector_Point:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Point(_:));
+ (Mat*)vector_Point2f_to_Mat:(NSArray<Point2f*>*)pts NS_SWIFT_NAME(vector_Point2f_to_Mat(_:));
+ (NSArray<Point2f*>*)Mat_to_vector_Point2f:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Point2f(_:));
+ (Mat*)vector_Point2d_to_Mat:(NSArray<Point2d*>*)pts NS_SWIFT_NAME(vector_Point2d_to_Mat(_:));
+ (NSArray<Point2d*>*)Mat_to_vector_Point2d:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Point2d(_:));
+ (Mat*)vector_Point3i_to_Mat:(NSArray<Point3i*>*)pts NS_SWIFT_NAME(vector_Point3i_to_Mat(_:));
+ (NSArray<Point3i*>*)Mat_to_vector_Point3i:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Point3i(_:));
+ (Mat*)vector_Point3f_to_Mat:(NSArray<Point3f*>*)pts NS_SWIFT_NAME(vector_Point3f_to_Mat(_:));
+ (NSArray<Point3f*>*)Mat_to_vector_Point3f:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Point3f(_:));
+ (Mat*)vector_Point3d_to_Mat:(NSArray<Point3d*>*)pts NS_SWIFT_NAME(vector_Point3d_to_Mat(_:));
+ (NSArray<Point3d*>*)Mat_to_vector_Point3d:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Point3d(_:));
+ (Mat*)vector_float_to_Mat:(NSArray<NSNumber*>*)fs NS_SWIFT_NAME(vector_float_to_Mat(_:));
+ (NSArray<NSNumber*>*)Mat_to_vector_float:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_float(_:));
+ (Mat*)vector_uchar_to_Mat:(NSArray<NSNumber*>*)us NS_SWIFT_NAME(vector_uchar_to_Mat(_:));
+ (NSArray<NSNumber*>*)Mat_to_vector_uchar:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_uchar(_:));
+ (Mat*)vector_char_to_Mat:(NSArray<NSNumber*>*)cs NS_SWIFT_NAME(vector_char_to_Mat(_:));
+ (NSArray<NSNumber*>*)Mat_to_vector_char:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_char(_:));
+ (Mat*)vector_int_to_Mat:(NSArray<NSNumber*>*)is NS_SWIFT_NAME(vector_int_to_Mat(_:));
+ (NSArray<NSNumber*>*)Mat_to_vector_int:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_int(_:));
+ (Mat*)vector_Rect_to_Mat:(NSArray<Rect2i*>*)rs NS_SWIFT_NAME(vector_Rect_to_Mat(_:));
+ (NSArray<Rect2i*>*)Mat_to_vector_Rect:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Rect(_:));
+ (Mat*)vector_Rect2d_to_Mat:(NSArray<Rect2d*>*)rs NS_SWIFT_NAME(vector_Rect2d_to_Mat(_:));
+ (NSArray<Rect2d*>*)Mat_to_vector_Rect2d:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_Rect2d(_:));
+ (Mat*)vector_KeyPoint_to_Mat:(NSArray<KeyPoint*>*)kps NS_SWIFT_NAME(vector_KeyPoint_to_Mat(_:));
+ (NSArray<KeyPoint*>*)Mat_to_vector_KeyPoint:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_KeyPoint(_:));
+ (Mat*)vector_double_to_Mat:(NSArray<NSNumber*>*)ds NS_SWIFT_NAME(vector_double_to_Mat(_:));
+ (NSArray<NSNumber*>*)Mat_to_vector_double:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_double(_:));
+ (Mat*)vector_DMatch_to_Mat:(NSArray<DMatch*>*)matches NS_SWIFT_NAME(vector_DMatch_to_Mat(_:));
+ (NSArray<DMatch*>*)Mat_to_vector_DMatch:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_DMatch(_:));
+ (Mat*)vector_RotatedRect_to_Mat:(NSArray<RotatedRect*>*)rs NS_SWIFT_NAME(vector_RotatedRect_to_Mat(_:));
+ (NSArray<RotatedRect*>*)Mat_to_vector_RotatedRect:(Mat*)mat NS_SWIFT_NAME(Mat_to_vector_RotatedRect(_:));
@end
NS_ASSUME_NONNULL_END

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,85 @@
//
// CvCamera2.h
//
// Created by Giles Payne on 2020/03/11.
//
#import <UIKit/UIKit.h>
#import <Accelerate/Accelerate.h>
#import <AVFoundation/AVFoundation.h>
#import <ImageIO/ImageIO.h>
#import "CVObjcUtil.h"
@class Mat;
@class CvAbstractCamera2;
CV_EXPORTS @interface CvAbstractCamera2 : NSObject
@property UIDeviceOrientation currentDeviceOrientation;
@property BOOL cameraAvailable;
@property (nonatomic, strong) AVCaptureSession* captureSession;
@property (nonatomic, strong) AVCaptureConnection* videoCaptureConnection;
@property (nonatomic, readonly) BOOL running;
@property (nonatomic, readonly) BOOL captureSessionLoaded;
@property (nonatomic, assign) int defaultFPS;
@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;
@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition;
@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer;
@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset;
@property (nonatomic, assign) int imageWidth;
@property (nonatomic, assign) int imageHeight;
@property (nonatomic, strong) UIView* parentView;
- (void)start;
- (void)stop;
- (void)switchCameras;
- (id)initWithParentView:(UIView*)parent;
- (void)createCaptureOutput;
- (void)createVideoPreviewLayer;
- (void)updateOrientation;
- (void)lockFocus;
- (void)unlockFocus;
- (void)lockExposure;
- (void)unlockExposure;
- (void)lockBalance;
- (void)unlockBalance;
@end
///////////////////////////////// CvVideoCamera ///////////////////////////////////////////
@class CvVideoCamera2;
@protocol CvVideoCameraDelegate2 <NSObject>
- (void)processImage:(Mat*)image;
@end
CV_EXPORTS @interface CvVideoCamera2 : CvAbstractCamera2<AVCaptureVideoDataOutputSampleBufferDelegate>
@property (nonatomic, weak) id<CvVideoCameraDelegate2> delegate;
@property (nonatomic, assign) BOOL grayscaleMode;
@property (nonatomic, assign) BOOL recordVideo;
@property (nonatomic, assign) BOOL rotateVideo;
@property (nonatomic, strong) AVAssetWriterInput* recordAssetWriterInput;
@property (nonatomic, strong) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
@property (nonatomic, strong) AVAssetWriter* recordAssetWriter;
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
- (void)layoutPreviewLayer;
- (void)saveVideo;
- (NSURL *)videoFileURL;
- (NSString *)videoFileString;
@end
///////////////////////////////// CvPhotoCamera ///////////////////////////////////////////
@class CvPhotoCamera2;
@protocol CvPhotoCameraDelegate2 <NSObject>
- (void)photoCamera:(CvPhotoCamera2*)photoCamera capturedImage:(UIImage*)image;
- (void)photoCameraCancel:(CvPhotoCamera2*)photoCamera;
@end
CV_EXPORTS @interface CvPhotoCamera2 : CvAbstractCamera2<AVCapturePhotoCaptureDelegate>
@property (nonatomic, weak) id<CvPhotoCameraDelegate2> delegate;
- (void)takePicture;
@end

View File

@ -0,0 +1,69 @@
//
// CvType.h
//
// Created by Giles Payne on 2019/10/13.
//
#ifdef __cplusplus
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
/**
* Utility functions for handling CvType values
*/
CV_EXPORTS @interface CvType : NSObject
#pragma mark - Type Utility functions
/**
* Create CvType value from depth and channel values
* @param depth Depth value. One of CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F or CV_64F
* @param channels Number of channels (from 1 to (CV_CN_MAX - 1))
*/
+ (int)makeType:(int)depth channels:(int)channels;
/**
* Get number of channels for type
* @param type Type value
*/
+ (int)channels:(int)type;
/**
* Get depth for type
* @param type Type value
*/
+ (int)depth:(int)type;
/**
* Get raw type size in bytes for type
* @param type Type value
*/
+ (int)rawTypeSize:(int)type;
/**
* Returns true if the raw type is an integer type (if depth is CV_8U, CV_8S, CV_16U, CV_16S or CV_32S)
* @param type Type value
*/
+ (BOOL)isInteger:(int)type;
/**
* Get element size in bytes for type
* @param type Type value
*/
+ (int)ELEM_SIZE:(int)type NS_SWIFT_NAME(elemSize(_:));
/**
* Get the string name for type
* @param type Type value
*/
+ (NSString*)typeToString:(int)type;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,195 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/xfeatures2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
@class Mat;
// C++: enum NormalizationType (cv.xfeatures2d.DAISY.NormalizationType)
typedef NS_ENUM(int, NormalizationType) {
DAISY_NRM_NONE NS_SWIFT_NAME(NRM_NONE) = 100,
DAISY_NRM_PARTIAL NS_SWIFT_NAME(NRM_PARTIAL) = 101,
DAISY_NRM_FULL NS_SWIFT_NAME(NRM_FULL) = 102,
DAISY_NRM_SIFT NS_SWIFT_NAME(NRM_SIFT) = 103
};
NS_ASSUME_NONNULL_BEGIN
// C++: class DAISY
/**
* Class implementing DAISY descriptor, described in CITE: Tola10
*
* radius radius of the descriptor at the initial scale
* q_radius amount of radial range division quantity
* q_theta amount of angular range division quantity
* q_hist amount of gradient orientations range division quantity
* norm choose descriptors normalization type, where
* DAISY::NRM_NONE will not do any normalization (default),
* DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0,
* DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0,
* DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
* H optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image
* interpolation switch to disable interpolation for speed improvement at minor quality loss
* use_orientation sample patterns using keypoints orientation, disabled by default.
*
* Member of `Xfeatures2d`
*/
CV_EXPORTS @interface DAISY : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::xfeatures2d::DAISY> nativePtrDAISY;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::xfeatures2d::DAISY>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::xfeatures2d::DAISY>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_DAISY cv::xfeatures2d::DAISY::create(float radius = 15, int q_radius = 3, int q_theta = 8, int q_hist = 8, NormalizationType norm = cv::xfeatures2d::DAISY::NRM_NONE, Mat H = Mat(), bool interpolation = true, bool use_orientation = false)
//
+ (DAISY*)create:(float)radius q_radius:(int)q_radius q_theta:(int)q_theta q_hist:(int)q_hist norm:(NormalizationType)norm H:(Mat*)H interpolation:(BOOL)interpolation use_orientation:(BOOL)use_orientation NS_SWIFT_NAME(create(radius:q_radius:q_theta:q_hist:norm:H:interpolation:use_orientation:));
+ (DAISY*)create:(float)radius q_radius:(int)q_radius q_theta:(int)q_theta q_hist:(int)q_hist norm:(NormalizationType)norm H:(Mat*)H interpolation:(BOOL)interpolation NS_SWIFT_NAME(create(radius:q_radius:q_theta:q_hist:norm:H:interpolation:));
+ (DAISY*)create:(float)radius q_radius:(int)q_radius q_theta:(int)q_theta q_hist:(int)q_hist norm:(NormalizationType)norm H:(Mat*)H NS_SWIFT_NAME(create(radius:q_radius:q_theta:q_hist:norm:H:));
+ (DAISY*)create:(float)radius q_radius:(int)q_radius q_theta:(int)q_theta q_hist:(int)q_hist norm:(NormalizationType)norm NS_SWIFT_NAME(create(radius:q_radius:q_theta:q_hist:norm:));
+ (DAISY*)create:(float)radius q_radius:(int)q_radius q_theta:(int)q_theta q_hist:(int)q_hist NS_SWIFT_NAME(create(radius:q_radius:q_theta:q_hist:));
+ (DAISY*)create:(float)radius q_radius:(int)q_radius q_theta:(int)q_theta NS_SWIFT_NAME(create(radius:q_radius:q_theta:));
+ (DAISY*)create:(float)radius q_radius:(int)q_radius NS_SWIFT_NAME(create(radius:q_radius:));
+ (DAISY*)create:(float)radius NS_SWIFT_NAME(create(radius:));
+ (DAISY*)create NS_SWIFT_NAME(create());
//
// void cv::xfeatures2d::DAISY::setRadius(float radius)
//
- (void)setRadius:(float)radius NS_SWIFT_NAME(setRadius(radius:));
//
// float cv::xfeatures2d::DAISY::getRadius()
//
- (float)getRadius NS_SWIFT_NAME(getRadius());
//
// void cv::xfeatures2d::DAISY::setQRadius(int q_radius)
//
- (void)setQRadius:(int)q_radius NS_SWIFT_NAME(setQRadius(q_radius:));
//
// int cv::xfeatures2d::DAISY::getQRadius()
//
- (int)getQRadius NS_SWIFT_NAME(getQRadius());
//
// void cv::xfeatures2d::DAISY::setQTheta(int q_theta)
//
- (void)setQTheta:(int)q_theta NS_SWIFT_NAME(setQTheta(q_theta:));
//
// int cv::xfeatures2d::DAISY::getQTheta()
//
- (int)getQTheta NS_SWIFT_NAME(getQTheta());
//
// void cv::xfeatures2d::DAISY::setQHist(int q_hist)
//
- (void)setQHist:(int)q_hist NS_SWIFT_NAME(setQHist(q_hist:));
//
// int cv::xfeatures2d::DAISY::getQHist()
//
- (int)getQHist NS_SWIFT_NAME(getQHist());
//
// void cv::xfeatures2d::DAISY::setNorm(int norm)
//
- (void)setNorm:(int)norm NS_SWIFT_NAME(setNorm(norm:));
//
// int cv::xfeatures2d::DAISY::getNorm()
//
- (int)getNorm NS_SWIFT_NAME(getNorm());
//
// void cv::xfeatures2d::DAISY::setH(Mat H)
//
- (void)setH:(Mat*)H NS_SWIFT_NAME(setH(H:));
//
// Mat cv::xfeatures2d::DAISY::getH()
//
- (Mat*)getH NS_SWIFT_NAME(getH());
//
// void cv::xfeatures2d::DAISY::setInterpolation(bool interpolation)
//
- (void)setInterpolation:(BOOL)interpolation NS_SWIFT_NAME(setInterpolation(interpolation:));
//
// bool cv::xfeatures2d::DAISY::getInterpolation()
//
- (BOOL)getInterpolation NS_SWIFT_NAME(getInterpolation());
//
// void cv::xfeatures2d::DAISY::setUseOrientation(bool use_orientation)
//
- (void)setUseOrientation:(BOOL)use_orientation NS_SWIFT_NAME(setUseOrientation(use_orientation:));
//
// bool cv::xfeatures2d::DAISY::getUseOrientation()
//
- (BOOL)getUseOrientation NS_SWIFT_NAME(getUseOrientation());
//
// String cv::xfeatures2d::DAISY::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,286 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/video.hpp"
#import "opencv2/video/tracking.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "DenseOpticalFlow.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class DISOpticalFlow
/**
* DIS optical flow algorithm.
*
* This class implements the Dense Inverse Search (DIS) optical flow algorithm. More
* details about the algorithm can be found at CITE: Kroeger2016 . Includes three presets with preselected
* parameters to provide reasonable trade-off between speed and quality. However, even the slowest preset is
* still relatively fast, use DeepFlow if you need better quality and don't care about speed.
*
* This implementation includes several additional features compared to the algorithm described in the paper,
* including spatial propagation of flow vectors (REF: getUseSpatialPropagation), as well as an option to
* utilize an initial flow approximation passed to REF: calc (which is, essentially, temporal propagation,
* if the previous frame's flow field is passed).
*
* Member of `Video`
*/
CV_EXPORTS @interface DISOpticalFlow : DenseOpticalFlow
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::DISOpticalFlow> nativePtrDISOpticalFlow;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::DISOpticalFlow>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::DISOpticalFlow>)nativePtr;
#endif
#pragma mark - Class Constants
@property (class, readonly) int PRESET_ULTRAFAST NS_SWIFT_NAME(PRESET_ULTRAFAST);
@property (class, readonly) int PRESET_FAST NS_SWIFT_NAME(PRESET_FAST);
@property (class, readonly) int PRESET_MEDIUM NS_SWIFT_NAME(PRESET_MEDIUM);
#pragma mark - Methods
//
// int cv::DISOpticalFlow::getFinestScale()
//
/**
* Finest level of the Gaussian pyramid on which the flow is computed (zero level
* corresponds to the original image resolution). The final flow is obtained by bilinear upscaling.
* @see `-setFinestScale:`
*/
- (int)getFinestScale NS_SWIFT_NAME(getFinestScale());
//
// void cv::DISOpticalFlow::setFinestScale(int val)
//
/**
* getFinestScale @see `-getFinestScale:`
*/
- (void)setFinestScale:(int)val NS_SWIFT_NAME(setFinestScale(val:));
//
// int cv::DISOpticalFlow::getPatchSize()
//
/**
* Size of an image patch for matching (in pixels). Normally, default 8x8 patches work well
* enough in most cases.
* @see `-setPatchSize:`
*/
- (int)getPatchSize NS_SWIFT_NAME(getPatchSize());
//
// void cv::DISOpticalFlow::setPatchSize(int val)
//
/**
* getPatchSize @see `-getPatchSize:`
*/
- (void)setPatchSize:(int)val NS_SWIFT_NAME(setPatchSize(val:));
//
// int cv::DISOpticalFlow::getPatchStride()
//
/**
* Stride between neighbor patches. Must be less than patch size. Lower values correspond
* to higher flow quality.
* @see `-setPatchStride:`
*/
- (int)getPatchStride NS_SWIFT_NAME(getPatchStride());
//
// void cv::DISOpticalFlow::setPatchStride(int val)
//
/**
* getPatchStride @see `-getPatchStride:`
*/
- (void)setPatchStride:(int)val NS_SWIFT_NAME(setPatchStride(val:));
//
// int cv::DISOpticalFlow::getGradientDescentIterations()
//
/**
* Maximum number of gradient descent iterations in the patch inverse search stage. Higher values
* may improve quality in some cases.
* @see `-setGradientDescentIterations:`
*/
- (int)getGradientDescentIterations NS_SWIFT_NAME(getGradientDescentIterations());
//
// void cv::DISOpticalFlow::setGradientDescentIterations(int val)
//
/**
* getGradientDescentIterations @see `-getGradientDescentIterations:`
*/
- (void)setGradientDescentIterations:(int)val NS_SWIFT_NAME(setGradientDescentIterations(val:));
//
// int cv::DISOpticalFlow::getVariationalRefinementIterations()
//
/**
* Number of fixed point iterations of variational refinement per scale. Set to zero to
* disable variational refinement completely. Higher values will typically result in more smooth and
* high-quality flow.
* @see `-setGradientDescentIterations:`
*/
- (int)getVariationalRefinementIterations NS_SWIFT_NAME(getVariationalRefinementIterations());
//
// void cv::DISOpticalFlow::setVariationalRefinementIterations(int val)
//
/**
* getGradientDescentIterations @see `-getGradientDescentIterations:`
*/
- (void)setVariationalRefinementIterations:(int)val NS_SWIFT_NAME(setVariationalRefinementIterations(val:));
//
// float cv::DISOpticalFlow::getVariationalRefinementAlpha()
//
/**
* Weight of the smoothness term
* @see `-setVariationalRefinementAlpha:`
*/
- (float)getVariationalRefinementAlpha NS_SWIFT_NAME(getVariationalRefinementAlpha());
//
// void cv::DISOpticalFlow::setVariationalRefinementAlpha(float val)
//
/**
* getVariationalRefinementAlpha @see `-getVariationalRefinementAlpha:`
*/
- (void)setVariationalRefinementAlpha:(float)val NS_SWIFT_NAME(setVariationalRefinementAlpha(val:));
//
// float cv::DISOpticalFlow::getVariationalRefinementDelta()
//
/**
* Weight of the color constancy term
* @see `-setVariationalRefinementDelta:`
*/
- (float)getVariationalRefinementDelta NS_SWIFT_NAME(getVariationalRefinementDelta());
//
// void cv::DISOpticalFlow::setVariationalRefinementDelta(float val)
//
/**
* getVariationalRefinementDelta @see `-getVariationalRefinementDelta:`
*/
- (void)setVariationalRefinementDelta:(float)val NS_SWIFT_NAME(setVariationalRefinementDelta(val:));
//
// float cv::DISOpticalFlow::getVariationalRefinementGamma()
//
/**
* Weight of the gradient constancy term
* @see `-setVariationalRefinementGamma:`
*/
- (float)getVariationalRefinementGamma NS_SWIFT_NAME(getVariationalRefinementGamma());
//
// void cv::DISOpticalFlow::setVariationalRefinementGamma(float val)
//
/**
* getVariationalRefinementGamma @see `-getVariationalRefinementGamma:`
*/
- (void)setVariationalRefinementGamma:(float)val NS_SWIFT_NAME(setVariationalRefinementGamma(val:));
//
// bool cv::DISOpticalFlow::getUseMeanNormalization()
//
/**
* Whether to use mean-normalization of patches when computing patch distance. It is turned on
* by default as it typically provides a noticeable quality boost because of increased robustness to
* illumination variations. Turn it off if you are certain that your sequence doesn't contain any changes
* in illumination.
* @see `-setUseMeanNormalization:`
*/
- (BOOL)getUseMeanNormalization NS_SWIFT_NAME(getUseMeanNormalization());
//
// void cv::DISOpticalFlow::setUseMeanNormalization(bool val)
//
/**
* getUseMeanNormalization @see `-getUseMeanNormalization:`
*/
- (void)setUseMeanNormalization:(BOOL)val NS_SWIFT_NAME(setUseMeanNormalization(val:));
//
// bool cv::DISOpticalFlow::getUseSpatialPropagation()
//
/**
* Whether to use spatial propagation of good optical flow vectors. This option is turned on by
* default, as it tends to work better on average and can sometimes help recover from major errors
* introduced by the coarse-to-fine scheme employed by the DIS optical flow algorithm. Turning this
* option off can make the output flow field a bit smoother, however.
* @see `-setUseSpatialPropagation:`
*/
- (BOOL)getUseSpatialPropagation NS_SWIFT_NAME(getUseSpatialPropagation());
//
// void cv::DISOpticalFlow::setUseSpatialPropagation(bool val)
//
/**
* getUseSpatialPropagation @see `-getUseSpatialPropagation:`
*/
- (void)setUseSpatialPropagation:(BOOL)val NS_SWIFT_NAME(setUseSpatialPropagation(val:));
//
// static Ptr_DISOpticalFlow cv::DISOpticalFlow::create(int preset = DISOpticalFlow::PRESET_FAST)
//
/**
* Creates an instance of DISOpticalFlow
*
* @param preset one of PRESET_ULTRAFAST, PRESET_FAST and PRESET_MEDIUM
*/
+ (DISOpticalFlow*)create:(int)preset NS_SWIFT_NAME(create(preset:));
/**
* Creates an instance of DISOpticalFlow
*
*/
+ (DISOpticalFlow*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,84 @@
//
// DMatch.h
//
// Created by Giles Payne on 2019/12/25.
//
#pragma once
#ifdef __cplusplus
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
/**
* Structure for matching: query descriptor index, train descriptor index, train
* image index and distance between descriptors.
*/
CV_EXPORTS @interface DMatch : NSObject
/**
* Query descriptor index.
*/
@property int queryIdx;
/**
* Train descriptor index.
*/
@property int trainIdx;
/**
* Train image index.
*/
@property int imgIdx;
/**
* Distance
*/
@property float distance;
#ifdef __cplusplus
@property(readonly) cv::DMatch& nativeRef;
#endif
- (instancetype)init;
- (instancetype)initWithQueryIdx:(int)queryIdx trainIdx:(int)trainIdx distance:(float)distance;
- (instancetype)initWithQueryIdx:(int)queryIdx trainIdx:(int)trainIdx imgIdx:(int)imgIdx distance:(float)distance;
#ifdef __cplusplus
+ (instancetype)fromNative:(cv::DMatch&)dMatch;
#endif
/**
* Distance comparison
* @param it DMatch object to compare
*/
- (BOOL)lessThan:(DMatch*)it;
/**
* Clone object
*/
- (DMatch*)clone;
/**
* Compare for equality
* @param other Object to compare
*/
- (BOOL)isEqual:(nullable id)other;
/**
* Calculate hash for this object
*/
- (NSUInteger)hash;
/**
* Returns a string that describes the contents of the object
*/
- (NSString*)description;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,79 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/edge_filter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class DTFilter
/**
* Interface for realizations of Domain Transform filter.
*
* For more details about this filter see CITE: Gastal11 .
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface DTFilter : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::DTFilter> nativePtrDTFilter;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::DTFilter>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::DTFilter>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::DTFilter::filter(Mat src, Mat& dst, int dDepth = -1)
//
/**
* Produce domain transform filtering operation on source image.
*
* @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
*
* @param dst destination image.
*
* @param dDepth optional depth of the output image. dDepth can be set to -1, which will be equivalent
* to src.depth().
*/
- (void)filter:(Mat*)src dst:(Mat*)dst dDepth:(int)dDepth NS_SWIFT_NAME(filter(src:dst:dDepth:));
/**
* Produce domain transform filtering operation on source image.
*
* @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
*
* @param dst destination image.
*
* to src.depth().
*/
- (void)filter:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(filter(src:dst:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,267 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ml.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "StatModel.h"
@class Mat;
// C++: enum DTreeFlags (cv.ml.DTrees.Flags)
typedef NS_ENUM(int, DTreeFlags) {
DTrees_PREDICT_AUTO NS_SWIFT_NAME(PREDICT_AUTO) = 0,
DTrees_PREDICT_SUM NS_SWIFT_NAME(PREDICT_SUM) = (1<<8),
DTrees_PREDICT_MAX_VOTE NS_SWIFT_NAME(PREDICT_MAX_VOTE) = (2<<8),
DTrees_PREDICT_MASK NS_SWIFT_NAME(PREDICT_MASK) = (3<<8)
};
NS_ASSUME_NONNULL_BEGIN
// C++: class DTrees
/**
* The class represents a single decision tree or a collection of decision trees.
*
* The current public interface of the class allows user to train only a single decision tree, however
* the class is capable of storing multiple decision trees and using them for prediction (by summing
* responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost)
* use this capability to implement decision tree ensembles.
*
* @see REF: ml_intro_trees
*
* Member of `Ml`
*/
CV_EXPORTS @interface DTrees : StatModel
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ml::DTrees> nativePtrDTrees;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ml::DTrees>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ml::DTrees>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::ml::DTrees::getMaxCategories()
//
/**
* @see `-setMaxCategories:`
*/
- (int)getMaxCategories NS_SWIFT_NAME(getMaxCategories());
//
// void cv::ml::DTrees::setMaxCategories(int val)
//
/**
* getMaxCategories @see `-getMaxCategories:`
*/
- (void)setMaxCategories:(int)val NS_SWIFT_NAME(setMaxCategories(val:));
//
// int cv::ml::DTrees::getMaxDepth()
//
/**
* @see `-setMaxDepth:`
*/
- (int)getMaxDepth NS_SWIFT_NAME(getMaxDepth());
//
// void cv::ml::DTrees::setMaxDepth(int val)
//
/**
* getMaxDepth @see `-getMaxDepth:`
*/
- (void)setMaxDepth:(int)val NS_SWIFT_NAME(setMaxDepth(val:));
//
// int cv::ml::DTrees::getMinSampleCount()
//
/**
* @see `-setMinSampleCount:`
*/
- (int)getMinSampleCount NS_SWIFT_NAME(getMinSampleCount());
//
// void cv::ml::DTrees::setMinSampleCount(int val)
//
/**
* getMinSampleCount @see `-getMinSampleCount:`
*/
- (void)setMinSampleCount:(int)val NS_SWIFT_NAME(setMinSampleCount(val:));
//
// int cv::ml::DTrees::getCVFolds()
//
/**
* @see `-setCVFolds:`
*/
- (int)getCVFolds NS_SWIFT_NAME(getCVFolds());
//
// void cv::ml::DTrees::setCVFolds(int val)
//
/**
* getCVFolds @see `-getCVFolds:`
*/
- (void)setCVFolds:(int)val NS_SWIFT_NAME(setCVFolds(val:));
//
// bool cv::ml::DTrees::getUseSurrogates()
//
/**
* @see `-setUseSurrogates:`
*/
- (BOOL)getUseSurrogates NS_SWIFT_NAME(getUseSurrogates());
//
// void cv::ml::DTrees::setUseSurrogates(bool val)
//
/**
* getUseSurrogates @see `-getUseSurrogates:`
*/
- (void)setUseSurrogates:(BOOL)val NS_SWIFT_NAME(setUseSurrogates(val:));
//
// bool cv::ml::DTrees::getUse1SERule()
//
/**
* @see `-setUse1SERule:`
*/
- (BOOL)getUse1SERule NS_SWIFT_NAME(getUse1SERule());
//
// void cv::ml::DTrees::setUse1SERule(bool val)
//
/**
* getUse1SERule @see `-getUse1SERule:`
*/
- (void)setUse1SERule:(BOOL)val NS_SWIFT_NAME(setUse1SERule(val:));
//
// bool cv::ml::DTrees::getTruncatePrunedTree()
//
/**
* @see `-setTruncatePrunedTree:`
*/
- (BOOL)getTruncatePrunedTree NS_SWIFT_NAME(getTruncatePrunedTree());
//
// void cv::ml::DTrees::setTruncatePrunedTree(bool val)
//
/**
* getTruncatePrunedTree @see `-getTruncatePrunedTree:`
*/
- (void)setTruncatePrunedTree:(BOOL)val NS_SWIFT_NAME(setTruncatePrunedTree(val:));
//
// float cv::ml::DTrees::getRegressionAccuracy()
//
/**
* @see `-setRegressionAccuracy:`
*/
- (float)getRegressionAccuracy NS_SWIFT_NAME(getRegressionAccuracy());
//
// void cv::ml::DTrees::setRegressionAccuracy(float val)
//
/**
* getRegressionAccuracy @see `-getRegressionAccuracy:`
*/
- (void)setRegressionAccuracy:(float)val NS_SWIFT_NAME(setRegressionAccuracy(val:));
//
// Mat cv::ml::DTrees::getPriors()
//
/**
* @see `-setPriors:`
*/
- (Mat*)getPriors NS_SWIFT_NAME(getPriors());
//
// void cv::ml::DTrees::setPriors(Mat val)
//
/**
* getPriors @see `-getPriors:`
*/
- (void)setPriors:(Mat*)val NS_SWIFT_NAME(setPriors(val:));
//
// static Ptr_DTrees cv::ml::DTrees::create()
//
/**
* Creates the empty model
*
* The static method creates empty decision tree with the specified parameters. It should be then
* trained using train method (see StatModel::train). Alternatively, you can load the model from
* file using Algorithm::load\<DTrees\>(filename).
*/
+ (DTrees*)create NS_SWIFT_NAME(create());
//
// static Ptr_DTrees cv::ml::DTrees::load(String filepath, String nodeName = String())
//
/**
* Loads and creates a serialized DTrees from a file
*
* Use DTree::save to serialize and store an DTree to disk.
* Load the DTree from this file again, by calling this function with the path to the file.
* Optionally specify the node for the file containing the classifier
*
* @param filepath path to serialized DTree
* @param nodeName name of node containing the classifier
*/
+ (DTrees*)load:(NSString*)filepath nodeName:(NSString*)nodeName NS_SWIFT_NAME(load(filepath:nodeName:));
/**
* Loads and creates a serialized DTrees from a file
*
* Use DTree::save to serialize and store an DTree to disk.
* Load the DTree from this file again, by calling this function with the path to the file.
* Optionally specify the node for the file containing the classifier
*
* @param filepath path to serialized DTree
*/
+ (DTrees*)load:(NSString*)filepath NS_SWIFT_NAME(load(filepath:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,72 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/video.hpp"
#import "opencv2/video/tracking.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class DenseOpticalFlow
/**
* Base class for dense optical flow algorithms
*
* Member of `Video`
*/
CV_EXPORTS @interface DenseOpticalFlow : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::DenseOpticalFlow> nativePtrDenseOpticalFlow;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::DenseOpticalFlow>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::DenseOpticalFlow>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::DenseOpticalFlow::calc(Mat I0, Mat I1, Mat& flow)
//
/**
* Calculates an optical flow.
*
* @param I0 first 8-bit single-channel input image.
* @param I1 second input image of the same size and the same type as prev.
* @param flow computed flow image that has the same size as prev and type CV_32FC2.
*/
- (void)calc:(Mat*)I0 I1:(Mat*)I1 flow:(Mat*)flow NS_SWIFT_NAME(calc(I0:I1:flow:));
//
// void cv::DenseOpticalFlow::collectGarbage()
//
/**
* Releases all inner buffers.
*/
- (void)collectGarbage NS_SWIFT_NAME(collectGarbage());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,482 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/features2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class DMatch;
@class Mat;
// C++: enum MatcherType (cv.DescriptorMatcher.MatcherType)
typedef NS_ENUM(int, MatcherType) {
DescriptorMatcher_FLANNBASED NS_SWIFT_NAME(FLANNBASED) = 1,
DescriptorMatcher_BRUTEFORCE NS_SWIFT_NAME(BRUTEFORCE) = 2,
DescriptorMatcher_BRUTEFORCE_L1 NS_SWIFT_NAME(BRUTEFORCE_L1) = 3,
DescriptorMatcher_BRUTEFORCE_HAMMING NS_SWIFT_NAME(BRUTEFORCE_HAMMING) = 4,
DescriptorMatcher_BRUTEFORCE_HAMMINGLUT NS_SWIFT_NAME(BRUTEFORCE_HAMMINGLUT) = 5,
DescriptorMatcher_BRUTEFORCE_SL2 NS_SWIFT_NAME(BRUTEFORCE_SL2) = 6
};
NS_ASSUME_NONNULL_BEGIN
// C++: class DescriptorMatcher
/**
* Abstract base class for matching keypoint descriptors.
*
* It has two groups of match methods: for matching descriptors of an image with another image or with
* an image set.
*
* Member of `Features2d`
*/
CV_EXPORTS @interface DescriptorMatcher : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::DescriptorMatcher> nativePtrDescriptorMatcher;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::DescriptorMatcher>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::DescriptorMatcher>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::DescriptorMatcher::add(vector_Mat descriptors)
//
/**
* Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor
* collection.
*
* If the collection is not empty, the new descriptors are added to existing train descriptors.
*
* @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
* train image.
*/
- (void)add:(NSArray<Mat*>*)descriptors NS_SWIFT_NAME(add(descriptors:));
//
// vector_Mat cv::DescriptorMatcher::getTrainDescriptors()
//
/**
* Returns a constant link to the train descriptor collection trainDescCollection .
*/
- (NSArray<Mat*>*)getTrainDescriptors NS_SWIFT_NAME(getTrainDescriptors());
//
// void cv::DescriptorMatcher::clear()
//
/**
* Clears the train descriptor collections.
*/
- (void)clear NS_SWIFT_NAME(clear());
//
// bool cv::DescriptorMatcher::empty()
//
/**
* Returns true if there are no train descriptors in the both collections.
*/
- (BOOL)empty NS_SWIFT_NAME(empty());
//
// bool cv::DescriptorMatcher::isMaskSupported()
//
/**
* Returns true if the descriptor matcher supports masking permissible matches.
*/
- (BOOL)isMaskSupported NS_SWIFT_NAME(isMaskSupported());
//
// void cv::DescriptorMatcher::train()
//
/**
* Trains a descriptor matcher
*
* Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
* train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher)
* have an empty implementation of this method. Other matchers really train their inner structures (for
* example, FlannBasedMatcher trains flann::Index ).
*/
- (void)train NS_SWIFT_NAME(train());
//
// void cv::DescriptorMatcher::match(Mat queryDescriptors, Mat trainDescriptors, vector_DMatch& matches, Mat mask = Mat())
//
/**
* Finds the best match for each descriptor from a query set.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
* descriptor. So, matches size may be smaller than the query descriptors count.
* @param mask Mask specifying permissible matches between an input query and train matrices of
* descriptors.
*
* In the first variant of this method, the train descriptors are passed as an input argument. In the
* second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is
* used. Optional mask (or masks) can be passed to specify which query and training descriptors can be
* matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
* mask.at\<uchar\>(i,j) is non-zero.
*/
- (void)match:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<DMatch*>*)matches mask:(Mat*)mask NS_SWIFT_NAME(match(queryDescriptors:trainDescriptors:matches:mask:));
/**
* Finds the best match for each descriptor from a query set.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
* descriptor. So, matches size may be smaller than the query descriptors count.
* descriptors.
*
* In the first variant of this method, the train descriptors are passed as an input argument. In the
* second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is
* used. Optional mask (or masks) can be passed to specify which query and training descriptors can be
* matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
* mask.at\<uchar\>(i,j) is non-zero.
*/
- (void)match:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<DMatch*>*)matches NS_SWIFT_NAME(match(queryDescriptors:trainDescriptors:matches:));
//
// void cv::DescriptorMatcher::knnMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false)
//
/**
* Finds the k best matches for each descriptor from a query set.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param mask Mask specifying permissible matches between an input query and train matrices of
* descriptors.
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
* less than k possible matches in total.
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*
* These extended variants of DescriptorMatcher::match methods find several best matches for each query
* descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match
* for the details about query and train descriptors.
*/
- (void)knnMatch:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches k:(int)k mask:(Mat*)mask compactResult:(BOOL)compactResult NS_SWIFT_NAME(knnMatch(queryDescriptors:trainDescriptors:matches:k:mask:compactResult:));
/**
* Finds the k best matches for each descriptor from a query set.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param mask Mask specifying permissible matches between an input query and train matrices of
* descriptors.
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
* less than k possible matches in total.
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*
* These extended variants of DescriptorMatcher::match methods find several best matches for each query
* descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match
* for the details about query and train descriptors.
*/
- (void)knnMatch:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches k:(int)k mask:(Mat*)mask NS_SWIFT_NAME(knnMatch(queryDescriptors:trainDescriptors:matches:k:mask:));
/**
* Finds the k best matches for each descriptor from a query set.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* descriptors.
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
* less than k possible matches in total.
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*
* These extended variants of DescriptorMatcher::match methods find several best matches for each query
* descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match
* for the details about query and train descriptors.
*/
- (void)knnMatch:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches k:(int)k NS_SWIFT_NAME(knnMatch(queryDescriptors:trainDescriptors:matches:k:));
//
// void cv::DescriptorMatcher::radiusMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false)
//
/**
* For each query descriptor, finds the training descriptors not farther than the specified distance.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param matches Found matches.
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
* in Pixels)!
* @param mask Mask specifying permissible matches between an input query and train matrices of
* descriptors.
*
* For each query descriptor, the methods find such training descriptors that the distance between the
* query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are
* returned in the distance increasing order.
*/
- (void)radiusMatch:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches maxDistance:(float)maxDistance mask:(Mat*)mask compactResult:(BOOL)compactResult NS_SWIFT_NAME(radiusMatch(queryDescriptors:trainDescriptors:matches:maxDistance:mask:compactResult:));
/**
* For each query descriptor, finds the training descriptors not farther than the specified distance.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param matches Found matches.
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
* in Pixels)!
* @param mask Mask specifying permissible matches between an input query and train matrices of
* descriptors.
*
* For each query descriptor, the methods find such training descriptors that the distance between the
* query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are
* returned in the distance increasing order.
*/
- (void)radiusMatch:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches maxDistance:(float)maxDistance mask:(Mat*)mask NS_SWIFT_NAME(radiusMatch(queryDescriptors:trainDescriptors:matches:maxDistance:mask:));
/**
* For each query descriptor, finds the training descriptors not farther than the specified distance.
*
* @param queryDescriptors Query set of descriptors.
* @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
* collection stored in the class object.
* @param matches Found matches.
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
* in Pixels)!
* descriptors.
*
* For each query descriptor, the methods find such training descriptors that the distance between the
* query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are
* returned in the distance increasing order.
*/
- (void)radiusMatch:(Mat*)queryDescriptors trainDescriptors:(Mat*)trainDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches maxDistance:(float)maxDistance NS_SWIFT_NAME(radiusMatch(queryDescriptors:trainDescriptors:matches:maxDistance:));
//
// void cv::DescriptorMatcher::match(Mat queryDescriptors, vector_DMatch& matches, vector_Mat masks = vector_Mat())
//
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
* descriptor. So, matches size may be smaller than the query descriptors count.
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
*/
- (void)match:(Mat*)queryDescriptors matches:(NSMutableArray<DMatch*>*)matches masks:(NSArray<Mat*>*)masks NS_SWIFT_NAME(match(queryDescriptors:matches:masks:));
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
* descriptor. So, matches size may be smaller than the query descriptors count.
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
*/
- (void)match:(Mat*)queryDescriptors matches:(NSMutableArray<DMatch*>*)matches NS_SWIFT_NAME(match(queryDescriptors:matches:));
//
// void cv::DescriptorMatcher::knnMatch(Mat queryDescriptors, vector_vector_DMatch& matches, int k, vector_Mat masks = vector_Mat(), bool compactResult = false)
//
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
* less than k possible matches in total.
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*/
- (void)knnMatch:(Mat*)queryDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches k:(int)k masks:(NSArray<Mat*>*)masks compactResult:(BOOL)compactResult NS_SWIFT_NAME(knnMatch(queryDescriptors:matches:k:masks:compactResult:));
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
* less than k possible matches in total.
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*/
- (void)knnMatch:(Mat*)queryDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches k:(int)k masks:(NSArray<Mat*>*)masks NS_SWIFT_NAME(knnMatch(queryDescriptors:matches:k:masks:));
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
* @param k Count of best matches found per each query descriptor or less if a query descriptor has
* less than k possible matches in total.
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*/
- (void)knnMatch:(Mat*)queryDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches k:(int)k NS_SWIFT_NAME(knnMatch(queryDescriptors:matches:k:));
//
// void cv::DescriptorMatcher::radiusMatch(Mat queryDescriptors, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector_Mat(), bool compactResult = false)
//
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Found matches.
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
* in Pixels)!
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
* @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*/
- (void)radiusMatch:(Mat*)queryDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches maxDistance:(float)maxDistance masks:(NSArray<Mat*>*)masks compactResult:(BOOL)compactResult NS_SWIFT_NAME(radiusMatch(queryDescriptors:matches:maxDistance:masks:compactResult:));
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Found matches.
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
* in Pixels)!
* @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*/
- (void)radiusMatch:(Mat*)queryDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches maxDistance:(float)maxDistance masks:(NSArray<Mat*>*)masks NS_SWIFT_NAME(radiusMatch(queryDescriptors:matches:maxDistance:masks:));
/**
*
* @param queryDescriptors Query set of descriptors.
* @param matches Found matches.
* @param maxDistance Threshold for the distance between matched descriptors. Distance means here
* metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
* in Pixels)!
* descriptors and stored train descriptors from the i-th image trainDescCollection[i].
* false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
* the matches vector does not contain matches for fully masked-out query descriptors.
*/
- (void)radiusMatch:(Mat*)queryDescriptors matches:(NSMutableArray<NSMutableArray<DMatch*>*>*)matches maxDistance:(float)maxDistance NS_SWIFT_NAME(radiusMatch(queryDescriptors:matches:maxDistance:));
//
// void cv::DescriptorMatcher::write(String fileName)
//
- (void)write:(NSString*)fileName NS_SWIFT_NAME(write(fileName:));
//
// void cv::DescriptorMatcher::read(String fileName)
//
- (void)read:(NSString*)fileName NS_SWIFT_NAME(read(fileName:));
//
// void cv::DescriptorMatcher::read(FileNode arg1)
//
// Unknown type 'FileNode' (I), skipping the function
//
// Ptr_DescriptorMatcher cv::DescriptorMatcher::clone(bool emptyTrainData = false)
//
/**
* Clones the matcher.
*
* @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
* that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
* object copy with the current parameters but with empty train data.
*/
- (DescriptorMatcher*)clone:(BOOL)emptyTrainData NS_SWIFT_NAME(clone(emptyTrainData:));
/**
* Clones the matcher.
*
* that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
* object copy with the current parameters but with empty train data.
*/
- (DescriptorMatcher*)clone NS_SWIFT_NAME(clone());
//
// static Ptr_DescriptorMatcher cv::DescriptorMatcher::create(String descriptorMatcherType)
//
/**
* Creates a descriptor matcher of a given type with the default parameters (using default
* constructor).
*
* @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are
* supported:
* - `BruteForce` (it uses L2 )
* - `BruteForce-L1`
* - `BruteForce-Hamming`
* - `BruteForce-Hamming(2)`
* - `FlannBased`
*/
+ (DescriptorMatcher*)create2:(NSString*)descriptorMatcherType NS_SWIFT_NAME(create(descriptorMatcherType:));
//
// static Ptr_DescriptorMatcher cv::DescriptorMatcher::create(DescriptorMatcher_MatcherType matcherType)
//
+ (DescriptorMatcher*)create:(MatcherType)matcherType NS_SWIFT_NAME(create(matcherType:));
//
// void cv::DescriptorMatcher::write(FileStorage fs, String name)
//
// Unknown type 'FileStorage' (I), skipping the function
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,141 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/dnn.hpp"
#import "opencv2/dnn/dnn.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Model.h"
@class FloatVector;
@class IntVector;
@class Mat;
@class Net;
@class Rect2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class DetectionModel
/**
* This class represents high-level API for object detection networks.
*
* DetectionModel allows to set params for preprocessing input image.
* DetectionModel creates net from file with trained weights and config,
* sets preprocessing input, runs forward pass and return result detections.
* For DetectionModel SSD, Faster R-CNN, YOLO topologies are supported.
*
* Member of `Dnn`
*/
CV_EXPORTS @interface DetectionModel : Model
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::dnn::DetectionModel> nativePtrDetectionModel;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::dnn::DetectionModel>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::dnn::DetectionModel>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::dnn::DetectionModel::DetectionModel(String model, String config = "")
//
/**
* Create detection model from network represented in one of the supported formats.
* An order of @p model and @p config arguments does not matter.
* @param model Binary file contains trained weights.
* @param config Text file contains network configuration.
*/
- (instancetype)initWithModel:(NSString*)model config:(NSString*)config;
/**
* Create detection model from network represented in one of the supported formats.
* An order of @p model and @p config arguments does not matter.
* @param model Binary file contains trained weights.
*/
- (instancetype)initWithModel:(NSString*)model;
//
// cv::dnn::DetectionModel::DetectionModel(Net network)
//
/**
* Create model from deep learning network.
* @param network Net object.
*/
- (instancetype)initWithNetwork:(Net*)network;
//
// DetectionModel cv::dnn::DetectionModel::setNmsAcrossClasses(bool value)
//
/**
* nmsAcrossClasses defaults to false,
* such that when non max suppression is used during the detect() function, it will do so per-class.
* This function allows you to toggle this behaviour.
* @param value The new value for nmsAcrossClasses
*/
- (DetectionModel*)setNmsAcrossClasses:(BOOL)value NS_SWIFT_NAME(setNmsAcrossClasses(value:));
//
// bool cv::dnn::DetectionModel::getNmsAcrossClasses()
//
/**
* Getter for nmsAcrossClasses. This variable defaults to false,
* such that when non max suppression is used during the detect() function, it will do so only per-class
*/
- (BOOL)getNmsAcrossClasses NS_SWIFT_NAME(getNmsAcrossClasses());
//
// void cv::dnn::DetectionModel::detect(Mat frame, vector_int& classIds, vector_float& confidences, vector_Rect& boxes, float confThreshold = 0.5f, float nmsThreshold = 0.0f)
//
/**
* Given the @p input frame, create input blob, run net and return result detections.
* @param classIds Class indexes in result detection.
* @param confidences A set of corresponding confidences.
* @param boxes A set of bounding boxes.
* @param confThreshold A threshold used to filter boxes by confidences.
* @param nmsThreshold A threshold used in non maximum suppression.
*/
- (void)detect:(Mat*)frame classIds:(IntVector*)classIds confidences:(FloatVector*)confidences boxes:(NSMutableArray<Rect2i*>*)boxes confThreshold:(float)confThreshold nmsThreshold:(float)nmsThreshold NS_SWIFT_NAME(detect(frame:classIds:confidences:boxes:confThreshold:nmsThreshold:));
/**
* Given the @p input frame, create input blob, run net and return result detections.
* @param classIds Class indexes in result detection.
* @param confidences A set of corresponding confidences.
* @param boxes A set of bounding boxes.
* @param confThreshold A threshold used to filter boxes by confidences.
*/
- (void)detect:(Mat*)frame classIds:(IntVector*)classIds confidences:(FloatVector*)confidences boxes:(NSMutableArray<Rect2i*>*)boxes confThreshold:(float)confThreshold NS_SWIFT_NAME(detect(frame:classIds:confidences:boxes:confThreshold:));
/**
* Given the @p input frame, create input blob, run net and return result detections.
* @param classIds Class indexes in result detection.
* @param confidences A set of corresponding confidences.
* @param boxes A set of bounding boxes.
*/
- (void)detect:(Mat*)frame classIds:(IntVector*)classIds confidences:(FloatVector*)confidences boxes:(NSMutableArray<Rect2i*>*)boxes NS_SWIFT_NAME(detect(frame:classIds:confidences:boxes:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,272 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/aruco_detector.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class DetectorParameters
/**
* struct DetectorParameters is used by ArucoDetector
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface DetectorParameters : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::DetectorParameters> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::DetectorParameters>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::DetectorParameters>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::DetectorParameters::DetectorParameters()
//
- (instancetype)init;
//
// bool cv::aruco::DetectorParameters::readDetectorParameters(FileNode fn)
//
// Unknown type 'FileNode' (I), skipping the function
//
// bool cv::aruco::DetectorParameters::writeDetectorParameters(FileStorage fs, String name = String())
//
// Unknown type 'FileStorage' (I), skipping the function
//
// C++: int cv::aruco::DetectorParameters::adaptiveThreshWinSizeMin
//
@property int adaptiveThreshWinSizeMin;
//
// C++: int cv::aruco::DetectorParameters::adaptiveThreshWinSizeMax
//
@property int adaptiveThreshWinSizeMax;
//
// C++: int cv::aruco::DetectorParameters::adaptiveThreshWinSizeStep
//
@property int adaptiveThreshWinSizeStep;
//
// C++: double cv::aruco::DetectorParameters::adaptiveThreshConstant
//
@property double adaptiveThreshConstant;
//
// C++: double cv::aruco::DetectorParameters::minMarkerPerimeterRate
//
@property double minMarkerPerimeterRate;
//
// C++: double cv::aruco::DetectorParameters::maxMarkerPerimeterRate
//
@property double maxMarkerPerimeterRate;
//
// C++: double cv::aruco::DetectorParameters::polygonalApproxAccuracyRate
//
@property double polygonalApproxAccuracyRate;
//
// C++: double cv::aruco::DetectorParameters::minCornerDistanceRate
//
@property double minCornerDistanceRate;
//
// C++: int cv::aruco::DetectorParameters::minDistanceToBorder
//
@property int minDistanceToBorder;
//
// C++: double cv::aruco::DetectorParameters::minMarkerDistanceRate
//
@property double minMarkerDistanceRate;
//
// C++: float cv::aruco::DetectorParameters::minGroupDistance
//
@property float minGroupDistance;
//
// C++: int cv::aruco::DetectorParameters::cornerRefinementMethod
//
@property int cornerRefinementMethod;
//
// C++: int cv::aruco::DetectorParameters::cornerRefinementWinSize
//
@property int cornerRefinementWinSize;
//
// C++: float cv::aruco::DetectorParameters::relativeCornerRefinmentWinSize
//
@property float relativeCornerRefinmentWinSize;
//
// C++: int cv::aruco::DetectorParameters::cornerRefinementMaxIterations
//
@property int cornerRefinementMaxIterations;
//
// C++: double cv::aruco::DetectorParameters::cornerRefinementMinAccuracy
//
@property double cornerRefinementMinAccuracy;
//
// C++: int cv::aruco::DetectorParameters::markerBorderBits
//
@property int markerBorderBits;
//
// C++: int cv::aruco::DetectorParameters::perspectiveRemovePixelPerCell
//
@property int perspectiveRemovePixelPerCell;
//
// C++: double cv::aruco::DetectorParameters::perspectiveRemoveIgnoredMarginPerCell
//
@property double perspectiveRemoveIgnoredMarginPerCell;
//
// C++: double cv::aruco::DetectorParameters::maxErroneousBitsInBorderRate
//
@property double maxErroneousBitsInBorderRate;
//
// C++: double cv::aruco::DetectorParameters::minOtsuStdDev
//
@property double minOtsuStdDev;
//
// C++: double cv::aruco::DetectorParameters::errorCorrectionRate
//
@property double errorCorrectionRate;
//
// C++: float cv::aruco::DetectorParameters::aprilTagQuadDecimate
//
@property float aprilTagQuadDecimate;
//
// C++: float cv::aruco::DetectorParameters::aprilTagQuadSigma
//
@property float aprilTagQuadSigma;
//
// C++: int cv::aruco::DetectorParameters::aprilTagMinClusterPixels
//
@property int aprilTagMinClusterPixels;
//
// C++: int cv::aruco::DetectorParameters::aprilTagMaxNmaxima
//
@property int aprilTagMaxNmaxima;
//
// C++: float cv::aruco::DetectorParameters::aprilTagCriticalRad
//
@property float aprilTagCriticalRad;
//
// C++: float cv::aruco::DetectorParameters::aprilTagMaxLineFitMse
//
@property float aprilTagMaxLineFitMse;
//
// C++: int cv::aruco::DetectorParameters::aprilTagMinWhiteBlackDiff
//
@property int aprilTagMinWhiteBlackDiff;
//
// C++: int cv::aruco::DetectorParameters::aprilTagDeglitch
//
@property int aprilTagDeglitch;
//
// C++: bool cv::aruco::DetectorParameters::detectInvertedMarker
//
@property BOOL detectInvertedMarker;
//
// C++: bool cv::aruco::DetectorParameters::useAruco3Detection
//
@property BOOL useAruco3Detection;
//
// C++: int cv::aruco::DetectorParameters::minSideLengthCanonicalImg
//
@property int minSideLengthCanonicalImg;
//
// C++: float cv::aruco::DetectorParameters::minMarkerLengthRatioOriginalImg
//
@property float minMarkerLengthRatioOriginalImg;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,111 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/dnn.hpp"
#import "opencv2/dnn/dict.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class DictValue
/**
* This struct stores the scalar value (or array) of one of the following type: double, cv::String or int64.
* TODO: Maybe int64 is useless because double type exactly stores at least 2^52 integers.
*
* Member of `Dnn`
*/
CV_EXPORTS @interface DictValue : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::dnn::DictValue> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::dnn::DictValue>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::dnn::DictValue>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::dnn::DictValue::DictValue(int i)
//
- (instancetype)initWithI:(int)i;
//
// cv::dnn::DictValue::DictValue(double p)
//
- (instancetype)initWithP:(double)p;
//
// cv::dnn::DictValue::DictValue(String s)
//
- (instancetype)initWithS:(NSString*)s;
//
// bool cv::dnn::DictValue::isInt()
//
- (BOOL)isInt NS_SWIFT_NAME(isInt());
//
// bool cv::dnn::DictValue::isString()
//
- (BOOL)isString NS_SWIFT_NAME(isString());
//
// bool cv::dnn::DictValue::isReal()
//
- (BOOL)isReal NS_SWIFT_NAME(isReal());
//
// int cv::dnn::DictValue::getIntValue(int idx = -1)
//
- (int)getIntValue:(int)idx NS_SWIFT_NAME(getIntValue(idx:));
- (int)getIntValue NS_SWIFT_NAME(getIntValue());
//
// double cv::dnn::DictValue::getRealValue(int idx = -1)
//
- (double)getRealValue:(int)idx NS_SWIFT_NAME(getRealValue(idx:));
- (double)getRealValue NS_SWIFT_NAME(getRealValue());
//
// String cv::dnn::DictValue::getStringValue(int idx = -1)
//
- (NSString*)getStringValue:(int)idx NS_SWIFT_NAME(getStringValue(idx:));
- (NSString*)getStringValue NS_SWIFT_NAME(getStringValue());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,179 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/aruco_dictionary.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class Dictionary
/**
* Dictionary is a set of unique ArUco markers of the same size
*
* `bytesList` storing as 2-dimensions Mat with 4-th channels (CV_8UC4 type was used) and contains the marker codewords where:
* - bytesList.rows is the dictionary size
* - each marker is encoded using `nbytes = ceil(markerSize*markerSize/8.)` bytes
* - each row contains all 4 rotations of the marker, so its length is `4*nbytes`
* - the byte order in the bytesList[i] row:
* `//bytes without rotation/bytes with rotation 1/bytes with rotation 2/bytes with rotation 3//`
* So `bytesList.ptr(i)[k*nbytes + j]` is the j-th byte of i-th marker, in its k-th rotation.
* NOTE: Python bindings generate matrix with shape of bytesList `dictionary_size x nbytes x 4`,
* but it should be indexed like C++ version. Python example for j-th byte of i-th marker, in its k-th rotation:
* `aruco_dict.bytesList[id].ravel()[k*nbytes + j]`
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface Dictionary : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::Dictionary> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::Dictionary>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::Dictionary>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::Dictionary::Dictionary()
//
- (instancetype)init;
//
// cv::aruco::Dictionary::Dictionary(Mat bytesList, int _markerSize, int maxcorr = 0)
//
/**
* Basic ArUco dictionary constructor
*
* @param bytesList bits for all ArUco markers in dictionary see memory layout in the class description
* @param _markerSize ArUco marker size in units
* @param maxcorr maximum number of bits that can be corrected
*/
- (instancetype)initWithBytesList:(Mat*)bytesList _markerSize:(int)_markerSize maxcorr:(int)maxcorr;
/**
* Basic ArUco dictionary constructor
*
* @param bytesList bits for all ArUco markers in dictionary see memory layout in the class description
* @param _markerSize ArUco marker size in units
*/
- (instancetype)initWithBytesList:(Mat*)bytesList _markerSize:(int)_markerSize;
//
// bool cv::aruco::Dictionary::readDictionary(FileNode fn)
//
// Unknown type 'FileNode' (I), skipping the function
//
// void cv::aruco::Dictionary::writeDictionary(FileStorage fs, String name = String())
//
// Unknown type 'FileStorage' (I), skipping the function
//
// bool cv::aruco::Dictionary::identify(Mat onlyBits, int& idx, int& rotation, double maxCorrectionRate)
//
/**
* Given a matrix of bits. Returns whether if marker is identified or not.
*
* Returns reference to the marker id in the dictionary (if any) and its rotation.
*/
- (BOOL)identify:(Mat*)onlyBits idx:(int*)idx rotation:(int*)rotation maxCorrectionRate:(double)maxCorrectionRate NS_SWIFT_NAME(identify(onlyBits:idx:rotation:maxCorrectionRate:));
//
// int cv::aruco::Dictionary::getDistanceToId(Mat bits, int id, bool allRotations = true)
//
/**
* Returns Hamming distance of the input bits to the specific id.
*
* If `allRotations` flag is set, the four posible marker rotations are considered
*/
- (int)getDistanceToId:(Mat*)bits id:(int)id allRotations:(BOOL)allRotations NS_SWIFT_NAME(getDistanceToId(bits:id:allRotations:));
/**
* Returns Hamming distance of the input bits to the specific id.
*
* If `allRotations` flag is set, the four posible marker rotations are considered
*/
- (int)getDistanceToId:(Mat*)bits id:(int)id NS_SWIFT_NAME(getDistanceToId(bits:id:));
//
// void cv::aruco::Dictionary::generateImageMarker(int id, int sidePixels, Mat& _img, int borderBits = 1)
//
/**
* Generate a canonical marker image
*/
- (void)generateImageMarker:(int)id sidePixels:(int)sidePixels _img:(Mat*)_img borderBits:(int)borderBits NS_SWIFT_NAME(generateImageMarker(id:sidePixels:_img:borderBits:));
/**
* Generate a canonical marker image
*/
- (void)generateImageMarker:(int)id sidePixels:(int)sidePixels _img:(Mat*)_img NS_SWIFT_NAME(generateImageMarker(id:sidePixels:_img:));
//
// static Mat cv::aruco::Dictionary::getByteListFromBits(Mat bits)
//
/**
* Transform matrix of bits to list of bytes with 4 marker rotations
*/
+ (Mat*)getByteListFromBits:(Mat*)bits NS_SWIFT_NAME(getByteListFromBits(bits:));
//
// static Mat cv::aruco::Dictionary::getBitsFromByteList(Mat byteList, int markerSize)
//
/**
* Transform list of bytes to matrix of bits
*/
+ (Mat*)getBitsFromByteList:(Mat*)byteList markerSize:(int)markerSize NS_SWIFT_NAME(getBitsFromByteList(byteList:markerSize:));
//
// C++: Mat cv::aruco::Dictionary::bytesList
//
@property Mat* bytesList;
//
// C++: int cv::aruco::Dictionary::markerSize
//
@property int markerSize;
//
// C++: int cv::aruco::Dictionary::maxCorrectionBits
//
@property int maxCorrectionBits;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,137 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/disparity_filter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
@class Rect2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class DisparityFilter
/**
* Main interface for all disparity map filters.
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface DisparityFilter : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::DisparityFilter> nativePtrDisparityFilter;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::DisparityFilter>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::DisparityFilter>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::DisparityFilter::filter(Mat disparity_map_left, Mat left_view, Mat& filtered_disparity_map, Mat disparity_map_right = Mat(), Rect ROI = Rect(), Mat right_view = Mat())
//
/**
* Apply filtering to the disparity map.
*
* @param disparity_map_left disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity
* values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map
* can have any resolution, it will be automatically resized to fit left_view resolution.
*
* @param left_view left view of the original stereo-pair to guide the filtering process, 8-bit single-channel
* or three-channel image.
*
* @param filtered_disparity_map output disparity map.
*
* @param disparity_map_right optional argument, some implementations might also use the disparity map
* of the right view to compute confidence maps, for instance.
*
* @param ROI region of the disparity map to filter. Optional, usually it should be set automatically.
*
* @param right_view optional argument, some implementations might also use the right view of the original
* stereo-pair.
*/
- (void)filter:(Mat*)disparity_map_left left_view:(Mat*)left_view filtered_disparity_map:(Mat*)filtered_disparity_map disparity_map_right:(Mat*)disparity_map_right ROI:(Rect2i*)ROI right_view:(Mat*)right_view NS_SWIFT_NAME(filter(disparity_map_left:left_view:filtered_disparity_map:disparity_map_right:ROI:right_view:));
/**
* Apply filtering to the disparity map.
*
* @param disparity_map_left disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity
* values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map
* can have any resolution, it will be automatically resized to fit left_view resolution.
*
* @param left_view left view of the original stereo-pair to guide the filtering process, 8-bit single-channel
* or three-channel image.
*
* @param filtered_disparity_map output disparity map.
*
* @param disparity_map_right optional argument, some implementations might also use the disparity map
* of the right view to compute confidence maps, for instance.
*
* @param ROI region of the disparity map to filter. Optional, usually it should be set automatically.
*
* stereo-pair.
*/
- (void)filter:(Mat*)disparity_map_left left_view:(Mat*)left_view filtered_disparity_map:(Mat*)filtered_disparity_map disparity_map_right:(Mat*)disparity_map_right ROI:(Rect2i*)ROI NS_SWIFT_NAME(filter(disparity_map_left:left_view:filtered_disparity_map:disparity_map_right:ROI:));
/**
* Apply filtering to the disparity map.
*
* @param disparity_map_left disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity
* values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map
* can have any resolution, it will be automatically resized to fit left_view resolution.
*
* @param left_view left view of the original stereo-pair to guide the filtering process, 8-bit single-channel
* or three-channel image.
*
* @param filtered_disparity_map output disparity map.
*
* @param disparity_map_right optional argument, some implementations might also use the disparity map
* of the right view to compute confidence maps, for instance.
*
*
* stereo-pair.
*/
- (void)filter:(Mat*)disparity_map_left left_view:(Mat*)left_view filtered_disparity_map:(Mat*)filtered_disparity_map disparity_map_right:(Mat*)disparity_map_right NS_SWIFT_NAME(filter(disparity_map_left:left_view:filtered_disparity_map:disparity_map_right:));
/**
* Apply filtering to the disparity map.
*
* @param disparity_map_left disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity
* values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map
* can have any resolution, it will be automatically resized to fit left_view resolution.
*
* @param left_view left view of the original stereo-pair to guide the filtering process, 8-bit single-channel
* or three-channel image.
*
* @param filtered_disparity_map output disparity map.
*
* of the right view to compute confidence maps, for instance.
*
*
* stereo-pair.
*/
- (void)filter:(Mat*)disparity_map_left left_view:(Mat*)left_view filtered_disparity_map:(Mat*)filtered_disparity_map NS_SWIFT_NAME(filter(disparity_map_left:left_view:filtered_disparity_map:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,150 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/disparity_filter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "DisparityFilter.h"
@class Mat;
@class Rect2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class DisparityWLSFilter
/**
* Disparity map filter based on Weighted Least Squares filter (in form of Fast Global Smoother that
* is a lot faster than traditional Weighted Least Squares filter implementations) and optional use of
* left-right-consistency-based confidence to refine the results in half-occlusions and uniform areas.
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface DisparityWLSFilter : DisparityFilter
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::DisparityWLSFilter> nativePtrDisparityWLSFilter;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::DisparityWLSFilter>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::DisparityWLSFilter>)nativePtr;
#endif
#pragma mark - Methods
//
// double cv::ximgproc::DisparityWLSFilter::getLambda()
//
/**
* Lambda is a parameter defining the amount of regularization during filtering. Larger values force
* filtered disparity map edges to adhere more to source image edges. Typical value is 8000.
*/
- (double)getLambda NS_SWIFT_NAME(getLambda());
//
// void cv::ximgproc::DisparityWLSFilter::setLambda(double _lambda)
//
/**
* @see `-getLambda:`
*/
- (void)setLambda:(double)_lambda NS_SWIFT_NAME(setLambda(_lambda:));
//
// double cv::ximgproc::DisparityWLSFilter::getSigmaColor()
//
/**
* SigmaColor is a parameter defining how sensitive the filtering process is to source image edges.
* Large values can lead to disparity leakage through low-contrast edges. Small values can make the filter too
* sensitive to noise and textures in the source image. Typical values range from 0.8 to 2.0.
*/
- (double)getSigmaColor NS_SWIFT_NAME(getSigmaColor());
//
// void cv::ximgproc::DisparityWLSFilter::setSigmaColor(double _sigma_color)
//
/**
* @see `-getSigmaColor:`
*/
- (void)setSigmaColor:(double)_sigma_color NS_SWIFT_NAME(setSigmaColor(_sigma_color:));
//
// int cv::ximgproc::DisparityWLSFilter::getLRCthresh()
//
/**
* LRCthresh is a threshold of disparity difference used in left-right-consistency check during
* confidence map computation. The default value of 24 (1.5 pixels) is virtually always good enough.
*/
- (int)getLRCthresh NS_SWIFT_NAME(getLRCthresh());
//
// void cv::ximgproc::DisparityWLSFilter::setLRCthresh(int _LRC_thresh)
//
/**
* @see `-getLRCthresh:`
*/
- (void)setLRCthresh:(int)_LRC_thresh NS_SWIFT_NAME(setLRCthresh(_LRC_thresh:));
//
// int cv::ximgproc::DisparityWLSFilter::getDepthDiscontinuityRadius()
//
/**
* DepthDiscontinuityRadius is a parameter used in confidence computation. It defines the size of
* low-confidence regions around depth discontinuities.
*/
- (int)getDepthDiscontinuityRadius NS_SWIFT_NAME(getDepthDiscontinuityRadius());
//
// void cv::ximgproc::DisparityWLSFilter::setDepthDiscontinuityRadius(int _disc_radius)
//
/**
* @see `-getDepthDiscontinuityRadius:`
*/
- (void)setDepthDiscontinuityRadius:(int)_disc_radius NS_SWIFT_NAME(setDepthDiscontinuityRadius(_disc_radius:));
//
// Mat cv::ximgproc::DisparityWLSFilter::getConfidenceMap()
//
/**
* Get the confidence map that was used in the last filter call. It is a CV_32F one-channel image
* with values ranging from 0.0 (totally untrusted regions of the raw disparity map) to 255.0 (regions containing
* correct disparity values with a high degree of confidence).
*/
- (Mat*)getConfidenceMap NS_SWIFT_NAME(getConfidenceMap());
//
// Rect cv::ximgproc::DisparityWLSFilter::getROI()
//
/**
* Get the ROI used in the last filter call
*/
- (Rect2i*)getROI NS_SWIFT_NAME(getROI());
@end
NS_ASSUME_NONNULL_END

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,90 @@
//
// Double2.h
//
// Created by Giles Payne on 2020/05/22.
//
#pragma once
#ifdef __cplusplus
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Mat;
NS_ASSUME_NONNULL_BEGIN
/**
* Simple wrapper for a vector of two `double`
*/
CV_EXPORTS @interface Double2 : NSObject
#pragma mark - Properties
/**
* First vector element
*/
@property double v0;
/**
* Second vector element
*/
@property double v1;
#ifdef __cplusplus
/**
* The wrapped vector
*/
@property(readonly) cv::Vec2d& nativeRef;
#endif
#pragma mark - Constructors
/**
* Create zero-initialize vecior
*/
-(instancetype)init;
/**
* Create vector with specified element values
* @param v0 First element
* @param v1 Second element
*/
-(instancetype)initWithV0:(double)v0 v1:(double)v1;
/**
* Create vector with specified element values
* @param vals array of element values
*/
-(instancetype)initWithVals:(NSArray<NSNumber*>*)vals;
#ifdef __cplusplus
+(instancetype)fromNative:(cv::Vec2d&)vec2d;
#endif
/**
* Update vector with specified element values
* @param vals array of element values
*/
-(void)set:(NSArray<NSNumber*>*)vals NS_SWIFT_NAME(set(vals:));
/**
* Get vector as an array
*/
-(NSArray<NSNumber*>*)get;
#pragma mark - Common Methods
/**
* Compare for equality
* @param other Object to compare
*/
-(BOOL)isEqual:(nullable id)other;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,96 @@
//
// Double3.h
//
// Created by Giles Payne on 2020/05/22.
//
#pragma once
#ifdef __cplusplus
#import "opencv2/core.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Mat;
NS_ASSUME_NONNULL_BEGIN
/**
* Simple wrapper for a vector of three `double`
*/
CV_EXPORTS @interface Double3 : NSObject
#pragma mark - Properties
/**
* First vector element
*/
@property double v0;
/**
* Second vector element
*/
@property double v1;
/**
* Third vector element
*/
@property double v2;
#ifdef __cplusplus
/**
* The wrapped vector
*/
@property(readonly) cv::Vec3d& nativeRef;
#endif
#pragma mark - Constructors
/**
* Create zero-initialize vecior
*/
-(instancetype)init;
/**
* Create vector with specified element values
* @param v0 First element
* @param v1 Second element
* @param v2 Third element
*/
-(instancetype)initWithV0:(double)v0 v1:(double)v1 v2:(double)v2;
/**
* Create vector with specified element values
* @param vals array of element values
*/
-(instancetype)initWithVals:(NSArray<NSNumber*>*)vals;
#ifdef __cplusplus
+(instancetype)fromNative:(cv::Vec3d&)vec3d;
#endif
/**
* Update vector with specified element values
* @param vals array of element values
*/
-(void)set:(NSArray<NSNumber*>*)vals NS_SWIFT_NAME(set(vals:));
/**
* Get vector as an array
*/
-(NSArray<NSNumber*>*)get;
#pragma mark - Common Methods
/**
* Compare for equality
* @param other Object to compare
*/
-(BOOL)isEqual:(nullable id)other;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,89 @@
//
// DoubleVector.h
//
// Created by Giles Payne on 2020/01/04.
//
#pragma once
#import <Foundation/Foundation.h>
#ifdef __cplusplus
#import <vector>
#endif
#import "CVObjcUtil.h"
NS_ASSUME_NONNULL_BEGIN
/**
* Utility class to wrap a `std::vector<double>`
*/
CV_EXPORTS @interface DoubleVector : NSObject
#pragma mark - Constructors
/**
* Create DoubleVector and initialize with the contents of an NSData object
* @param data NSData containing raw double array
*/
-(instancetype)initWithData:(NSData*)data;
/**
* Create DoubleVector and initialize with the contents of another DoubleVector object
* @param src DoubleVector containing data to copy
*/
-(instancetype)initWithVector:(DoubleVector*)src;
#ifdef __OBJC__
/**
* Create DoubleVector from raw C array
* @param array The raw C array
* @elements elements The number of elements in the array
*/
-(instancetype)initWithNativeArray:(double*)array elements:(int)elements;
#endif
#ifdef __cplusplus
/**
* Create DoubleVector from std::vector<double>
* @param src The std::vector<double> object to wrap
*/
-(instancetype)initWithStdVector:(std::vector<double>&)src;
+(instancetype)fromNative:(std::vector<double>&)src;
#endif
#pragma mark - Properties
/**
* Length of the vector
*/
@property(readonly) size_t length;
#ifdef __OBJC__
/**
* Raw C array
*/
@property(readonly) double* nativeArray;
#endif
#ifdef __cplusplus
/**
* The wrapped std::vector<double> object
*/
@property(readonly) std::vector<double>& nativeRef;
#endif
/**
* NSData object containing the raw double data
*/
@property(readonly) NSData* data;
#pragma mark - Accessor method
/**
* Return array element
* @param index Index of the array element to return
*/
-(double)get:(NSInteger)index;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,621 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ml.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "StatModel.h"
@class Double2;
@class Mat;
@class TermCriteria;
// C++: enum EMTypes (cv.ml.EM.Types)
typedef NS_ENUM(int, EMTypes) {
EM_COV_MAT_SPHERICAL NS_SWIFT_NAME(COV_MAT_SPHERICAL) = 0,
EM_COV_MAT_DIAGONAL NS_SWIFT_NAME(COV_MAT_DIAGONAL) = 1,
EM_COV_MAT_GENERIC NS_SWIFT_NAME(COV_MAT_GENERIC) = 2,
EM_COV_MAT_DEFAULT NS_SWIFT_NAME(COV_MAT_DEFAULT) = EM_COV_MAT_DIAGONAL
};
NS_ASSUME_NONNULL_BEGIN
// C++: class EM
/**
* The class implements the Expectation Maximization algorithm.
*
* @see REF: ml_intro_em
*
* Member of `Ml`
*/
CV_EXPORTS @interface EM : StatModel
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ml::EM> nativePtrEM;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ml::EM>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ml::EM>)nativePtr;
#endif
#pragma mark - Class Constants
@property (class, readonly) int DEFAULT_NCLUSTERS NS_SWIFT_NAME(DEFAULT_NCLUSTERS);
@property (class, readonly) int DEFAULT_MAX_ITERS NS_SWIFT_NAME(DEFAULT_MAX_ITERS);
@property (class, readonly) int START_E_STEP NS_SWIFT_NAME(START_E_STEP);
@property (class, readonly) int START_M_STEP NS_SWIFT_NAME(START_M_STEP);
@property (class, readonly) int START_AUTO_STEP NS_SWIFT_NAME(START_AUTO_STEP);
#pragma mark - Methods
//
// int cv::ml::EM::getClustersNumber()
//
/**
* @see `-setClustersNumber:`
*/
- (int)getClustersNumber NS_SWIFT_NAME(getClustersNumber());
//
// void cv::ml::EM::setClustersNumber(int val)
//
/**
* getClustersNumber @see `-getClustersNumber:`
*/
- (void)setClustersNumber:(int)val NS_SWIFT_NAME(setClustersNumber(val:));
//
// int cv::ml::EM::getCovarianceMatrixType()
//
/**
* @see `-setCovarianceMatrixType:`
*/
- (int)getCovarianceMatrixType NS_SWIFT_NAME(getCovarianceMatrixType());
//
// void cv::ml::EM::setCovarianceMatrixType(int val)
//
/**
* getCovarianceMatrixType @see `-getCovarianceMatrixType:`
*/
- (void)setCovarianceMatrixType:(int)val NS_SWIFT_NAME(setCovarianceMatrixType(val:));
//
// TermCriteria cv::ml::EM::getTermCriteria()
//
/**
* @see `-setTermCriteria:`
*/
- (TermCriteria*)getTermCriteria NS_SWIFT_NAME(getTermCriteria());
//
// void cv::ml::EM::setTermCriteria(TermCriteria val)
//
/**
* getTermCriteria @see `-getTermCriteria:`
*/
- (void)setTermCriteria:(TermCriteria*)val NS_SWIFT_NAME(setTermCriteria(val:));
//
// Mat cv::ml::EM::getWeights()
//
/**
* Returns weights of the mixtures
*
* Returns vector with the number of elements equal to the number of mixtures.
*/
- (Mat*)getWeights NS_SWIFT_NAME(getWeights());
//
// Mat cv::ml::EM::getMeans()
//
/**
* Returns the cluster centers (means of the Gaussian mixture)
*
* Returns matrix with the number of rows equal to the number of mixtures and number of columns
* equal to the space dimensionality.
*/
- (Mat*)getMeans NS_SWIFT_NAME(getMeans());
//
// void cv::ml::EM::getCovs(vector_Mat& covs)
//
/**
* Returns covariation matrices
*
* Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
* each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
*/
- (void)getCovs:(NSMutableArray<Mat*>*)covs NS_SWIFT_NAME(getCovs(covs:));
//
// float cv::ml::EM::predict(Mat samples, Mat& results = Mat(), int flags = 0)
//
/**
* Returns posterior probabilities for the provided samples
*
* @param samples The input samples, floating-point matrix
* @param results The optional output `$$ nSamples \times nClusters$$` matrix of results. It contains
* posterior probabilities for each sample from the input
* @param flags This parameter will be ignored
*/
- (float)predict:(Mat*)samples results:(Mat*)results flags:(int)flags NS_SWIFT_NAME(predict(samples:results:flags:));
/**
* Returns posterior probabilities for the provided samples
*
* @param samples The input samples, floating-point matrix
* @param results The optional output `$$ nSamples \times nClusters$$` matrix of results. It contains
* posterior probabilities for each sample from the input
*/
- (float)predict:(Mat*)samples results:(Mat*)results NS_SWIFT_NAME(predict(samples:results:));
/**
* Returns posterior probabilities for the provided samples
*
* @param samples The input samples, floating-point matrix
* posterior probabilities for each sample from the input
*/
- (float)predict:(Mat*)samples NS_SWIFT_NAME(predict(samples:));
//
// Vec2d cv::ml::EM::predict2(Mat sample, Mat& probs)
//
/**
* Returns a likelihood logarithm value and an index of the most probable mixture component
* for the given sample.
*
* @param sample A sample for classification. It should be a one-channel matrix of
* `$$1 \times dims$$` or `$$dims \times 1$$` size.
* @param probs Optional output matrix that contains posterior probabilities of each component
* given the sample. It has `$$1 \times nclusters$$` size and CV_64FC1 type.
*
* The method returns a two-element double vector. Zero element is a likelihood logarithm value for
* the sample. First element is an index of the most probable mixture component for the given
* sample.
*/
- (Double2*)predict2:(Mat*)sample probs:(Mat*)probs NS_SWIFT_NAME(predict2(sample:probs:));
//
// bool cv::ml::EM::trainEM(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
//
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. Initial values of the model parameters will be
* estimated by the k-means algorithm.
*
* Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
* responses (class labels or function values) as input. Instead, it computes the *Maximum
* Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
* parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
* covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
* sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
* probable mixture component for each sample).
*
* The trained model can be used further for prediction, just like any other classifier. The
* trained model is similar to the NormalBayesClassifier.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* @param labels The optional output "class label" for each sample:
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* @param probs The optional output matrix that contains posterior probabilities of each Gaussian
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainEM:(Mat*)samples logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels probs:(Mat*)probs NS_SWIFT_NAME(trainEM(samples:logLikelihoods:labels:probs:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. Initial values of the model parameters will be
* estimated by the k-means algorithm.
*
* Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
* responses (class labels or function values) as input. Instead, it computes the *Maximum
* Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
* parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
* covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
* sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
* probable mixture component for each sample).
*
* The trained model can be used further for prediction, just like any other classifier. The
* trained model is similar to the NormalBayesClassifier.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* @param labels The optional output "class label" for each sample:
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainEM:(Mat*)samples logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels NS_SWIFT_NAME(trainEM(samples:logLikelihoods:labels:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. Initial values of the model parameters will be
* estimated by the k-means algorithm.
*
* Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
* responses (class labels or function values) as input. Instead, it computes the *Maximum
* Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
* parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
* covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
* sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
* probable mixture component for each sample).
*
* The trained model can be used further for prediction, just like any other classifier. The
* trained model is similar to the NormalBayesClassifier.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainEM:(Mat*)samples logLikelihoods:(Mat*)logLikelihoods NS_SWIFT_NAME(trainEM(samples:logLikelihoods:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. Initial values of the model parameters will be
* estimated by the k-means algorithm.
*
* Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
* responses (class labels or function values) as input. Instead, it computes the *Maximum
* Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
* parameters inside the structure: `$$p_{i,k}$$` in probs, `$$a_k$$` in means , `$$S_k$$` in
* covs[k], `$$\pi_k$$` in weights , and optionally computes the output "class label" for each
* sample: `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most
* probable mixture component for each sample).
*
* The trained model can be used further for prediction, just like any other classifier. The
* trained model is similar to the NormalBayesClassifier.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainEM:(Mat*)samples NS_SWIFT_NAME(trainEM(samples:));
//
// bool cv::ml::EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
//
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
* mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
* `$$S_k$$` of mixture components.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
* `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
* converted to the inner matrix of such type for the further computing.
* @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
* covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
* do not have CV_64F type they will be converted to the inner matrices of such type for the
* further computing.
* @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
* floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* @param labels The optional output "class label" for each sample:
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* @param probs The optional output matrix that contains posterior probabilities of each Gaussian
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels probs:(Mat*)probs NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:logLikelihoods:labels:probs:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
* mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
* `$$S_k$$` of mixture components.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
* `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
* converted to the inner matrix of such type for the further computing.
* @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
* covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
* do not have CV_64F type they will be converted to the inner matrices of such type for the
* further computing.
* @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
* floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* @param labels The optional output "class label" for each sample:
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:logLikelihoods:labels:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
* mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
* `$$S_k$$` of mixture components.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
* `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
* converted to the inner matrix of such type for the further computing.
* @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
* covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
* do not have CV_64F type they will be converted to the inner matrices of such type for the
* further computing.
* @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
* floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 logLikelihoods:(Mat*)logLikelihoods NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:logLikelihoods:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
* mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
* `$$S_k$$` of mixture components.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
* `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
* converted to the inner matrix of such type for the further computing.
* @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
* covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
* do not have CV_64F type they will be converted to the inner matrices of such type for the
* further computing.
* @param weights0 Initial weights `$$\pi_k$$` of mixture components. It should be a one-channel
* floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 weights0:(Mat*)weights0 NS_SWIFT_NAME(trainE(samples:means0:covs0:weights0:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
* mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
* `$$S_k$$` of mixture components.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
* `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
* converted to the inner matrix of such type for the further computing.
* @param covs0 The vector of initial covariance matrices `$$S_k$$` of mixture components. Each of
* covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
* do not have CV_64F type they will be converted to the inner matrices of such type for the
* further computing.
* floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 covs0:(Mat*)covs0 NS_SWIFT_NAME(trainE(samples:means0:covs0:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Expectation step. You need to provide initial means `$$a_k$$` of
* mixture components. Optionally you can pass initial weights `$$\pi_k$$` and covariance matrices
* `$$S_k$$` of mixture components.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param means0 Initial means `$$a_k$$` of mixture components. It is a one-channel matrix of
* `$$nclusters \times dims$$` size. If the matrix does not have CV_64F type it will be
* converted to the inner matrix of such type for the further computing.
* covariance matrices is a one-channel matrix of `$$dims \times dims$$` size. If the matrices
* do not have CV_64F type they will be converted to the inner matrices of such type for the
* further computing.
* floating-point matrix with `$$1 \times nclusters$$` or `$$nclusters \times 1$$` size.
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainE:(Mat*)samples means0:(Mat*)means0 NS_SWIFT_NAME(trainE(samples:means0:));
//
// bool cv::ml::EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
//
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Maximization step. You need to provide initial probabilities
* `$$p_{i,k}$$` to use this option.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param probs0 the probabilities
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* @param labels The optional output "class label" for each sample:
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* @param probs The optional output matrix that contains posterior probabilities of each Gaussian
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels probs:(Mat*)probs NS_SWIFT_NAME(trainM(samples:probs0:logLikelihoods:labels:probs:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Maximization step. You need to provide initial probabilities
* `$$p_{i,k}$$` to use this option.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param probs0 the probabilities
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* @param labels The optional output "class label" for each sample:
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 logLikelihoods:(Mat*)logLikelihoods labels:(Mat*)labels NS_SWIFT_NAME(trainM(samples:probs0:logLikelihoods:labels:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Maximization step. You need to provide initial probabilities
* `$$p_{i,k}$$` to use this option.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param probs0 the probabilities
* @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 logLikelihoods:(Mat*)logLikelihoods NS_SWIFT_NAME(trainM(samples:probs0:logLikelihoods:));
/**
* Estimate the Gaussian mixture parameters from a samples set.
*
* This variation starts with Maximization step. You need to provide initial probabilities
* `$$p_{i,k}$$` to use this option.
*
* @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
* one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
* it will be converted to the inner matrix of such type for the further computing.
* @param probs0 the probabilities
* each sample. It has `$$nsamples \times 1$$` size and CV_64FC1 type.
* `$$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N$$` (indices of the most probable
* mixture component for each sample). It has `$$nsamples \times 1$$` size and CV_32SC1 type.
* mixture component given the each sample. It has `$$nsamples \times nclusters$$` size and
* CV_64FC1 type.
*/
- (BOOL)trainM:(Mat*)samples probs0:(Mat*)probs0 NS_SWIFT_NAME(trainM(samples:probs0:));
//
// static Ptr_EM cv::ml::EM::create()
//
/**
* Creates empty %EM model.
* The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
* can use one of the EM::train\* methods or load it from file using Algorithm::load\<EM\>(filename).
*/
+ (EM*)create NS_SWIFT_NAME(create());
//
// static Ptr_EM cv::ml::EM::load(String filepath, String nodeName = String())
//
/**
* Loads and creates a serialized EM from a file
*
* Use EM::save to serialize and store an EM to disk.
* Load the EM from this file again, by calling this function with the path to the file.
* Optionally specify the node for the file containing the classifier
*
* @param filepath path to serialized EM
* @param nodeName name of node containing the classifier
*/
+ (EM*)load:(NSString*)filepath nodeName:(NSString*)nodeName NS_SWIFT_NAME(load(filepath:nodeName:));
/**
* Loads and creates a serialized EM from a file
*
* Use EM::save to serialize and store an EM to disk.
* Load the EM from this file again, by calling this function with the path to the file.
* Optionally specify the node for the file containing the classifier
*
* @param filepath path to serialized EM
*/
+ (EM*)load:(NSString*)filepath NS_SWIFT_NAME(load(filepath:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,52 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/text.hpp"
#import "opencv2/text/erfilter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class ERFilter
/**
* Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm CITE: Neumann12. :
*
* Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier.
*
* Member of `Text`
*/
CV_EXPORTS @interface ERFilter : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::text::ERFilter> nativePtrERFilter;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::text::ERFilter>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::text::ERFilter>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,53 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/text.hpp"
#import "opencv2/text/erfilter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class Callback
/**
* Callback with the classifier is made a class.
*
* By doing it we hide SVM, Boost etc. Developers can provide their own classifiers to the
* ERFilter algorithm.
*
* Member of `Text`
*/
CV_EXPORTS @interface ERFilterCallback : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::text::ERFilter::Callback> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::text::ERFilter::Callback>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::text::ERFilter::Callback>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,180 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/sparse_match_interpolator.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "SparseMatchInterpolator.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class EdgeAwareInterpolator
/**
* Sparse match interpolation algorithm based on modified locally-weighted affine
* estimator from CITE: Revaud2015 and Fast Global Smoother as post-processing filter.
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface EdgeAwareInterpolator : SparseMatchInterpolator
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::EdgeAwareInterpolator> nativePtrEdgeAwareInterpolator;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::EdgeAwareInterpolator>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::EdgeAwareInterpolator>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::EdgeAwareInterpolator::setCostMap(Mat _costMap)
//
/**
* Interface to provide a more elaborated cost map, i.e. edge map, for the edge-aware term.
* This implementation is based on a rather simple gradient-based edge map estimation.
* To used more complex edge map estimator (e.g. StructuredEdgeDetection that has been
* used in the original publication) that may lead to improved accuracies, the internal
* edge map estimation can be bypassed here.
* @param _costMap a type CV_32FC1 Mat is required.
* @see `cv::ximgproc::createSuperpixelSLIC`
*/
- (void)setCostMap:(Mat*)_costMap NS_SWIFT_NAME(setCostMap(_costMap:));
//
// void cv::ximgproc::EdgeAwareInterpolator::setK(int _k)
//
/**
* K is a number of nearest-neighbor matches considered, when fitting a locally affine
* model. Usually it should be around 128. However, lower values would make the interpolation
* noticeably faster.
*/
- (void)setK:(int)_k NS_SWIFT_NAME(setK(_k:));
//
// int cv::ximgproc::EdgeAwareInterpolator::getK()
//
/**
* @see `-setK:`
*/
- (int)getK NS_SWIFT_NAME(getK());
//
// void cv::ximgproc::EdgeAwareInterpolator::setSigma(float _sigma)
//
/**
* Sigma is a parameter defining how fast the weights decrease in the locally-weighted affine
* fitting. Higher values can help preserve fine details, lower values can help to get rid of noise in the
* output flow.
*/
- (void)setSigma:(float)_sigma NS_SWIFT_NAME(setSigma(_sigma:));
//
// float cv::ximgproc::EdgeAwareInterpolator::getSigma()
//
/**
* @see `-setSigma:`
*/
- (float)getSigma NS_SWIFT_NAME(getSigma());
//
// void cv::ximgproc::EdgeAwareInterpolator::setLambda(float _lambda)
//
/**
* Lambda is a parameter defining the weight of the edge-aware term in geodesic distance,
* should be in the range of 0 to 1000.
*/
- (void)setLambda:(float)_lambda NS_SWIFT_NAME(setLambda(_lambda:));
//
// float cv::ximgproc::EdgeAwareInterpolator::getLambda()
//
/**
* @see `-setLambda:`
*/
- (float)getLambda NS_SWIFT_NAME(getLambda());
//
// void cv::ximgproc::EdgeAwareInterpolator::setUsePostProcessing(bool _use_post_proc)
//
/**
* Sets whether the fastGlobalSmootherFilter() post-processing is employed. It is turned on by
* default.
*/
- (void)setUsePostProcessing:(BOOL)_use_post_proc NS_SWIFT_NAME(setUsePostProcessing(_use_post_proc:));
//
// bool cv::ximgproc::EdgeAwareInterpolator::getUsePostProcessing()
//
/**
* @see `-setUsePostProcessing:`
*/
- (BOOL)getUsePostProcessing NS_SWIFT_NAME(getUsePostProcessing());
//
// void cv::ximgproc::EdgeAwareInterpolator::setFGSLambda(float _lambda)
//
/**
* Sets the respective fastGlobalSmootherFilter() parameter.
*/
- (void)setFGSLambda:(float)_lambda NS_SWIFT_NAME(setFGSLambda(_lambda:));
//
// float cv::ximgproc::EdgeAwareInterpolator::getFGSLambda()
//
/**
* @see `-setFGSLambda:`
*/
- (float)getFGSLambda NS_SWIFT_NAME(getFGSLambda());
//
// void cv::ximgproc::EdgeAwareInterpolator::setFGSSigma(float _sigma)
//
/**
* @see `-setFGSLambda:`
*/
- (void)setFGSSigma:(float)_sigma NS_SWIFT_NAME(setFGSSigma(_sigma:));
//
// float cv::ximgproc::EdgeAwareInterpolator::getFGSSigma()
//
/**
* @see `-setFGSLambda:`
*/
- (float)getFGSSigma NS_SWIFT_NAME(getFGSSigma());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,290 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/edgeboxes.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
@class Rect2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class EdgeBoxes
/**
* Class implementing EdgeBoxes algorithm from CITE: ZitnickECCV14edgeBoxes :
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface EdgeBoxes : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::EdgeBoxes> nativePtrEdgeBoxes;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::EdgeBoxes>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::EdgeBoxes>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::EdgeBoxes::getBoundingBoxes(Mat edge_map, Mat orientation_map, vector_Rect& boxes, Mat& scores = Mat())
//
/**
* Returns array containing proposal boxes.
*
* @param edge_map edge image.
* @param orientation_map orientation map.
* @param boxes proposal boxes.
* @param scores of the proposal boxes, provided a vector of float types.
*/
- (void)getBoundingBoxes:(Mat*)edge_map orientation_map:(Mat*)orientation_map boxes:(NSMutableArray<Rect2i*>*)boxes scores:(Mat*)scores NS_SWIFT_NAME(getBoundingBoxes(edge_map:orientation_map:boxes:scores:));
/**
* Returns array containing proposal boxes.
*
* @param edge_map edge image.
* @param orientation_map orientation map.
* @param boxes proposal boxes.
*/
- (void)getBoundingBoxes:(Mat*)edge_map orientation_map:(Mat*)orientation_map boxes:(NSMutableArray<Rect2i*>*)boxes NS_SWIFT_NAME(getBoundingBoxes(edge_map:orientation_map:boxes:));
//
// float cv::ximgproc::EdgeBoxes::getAlpha()
//
/**
* Returns the step size of sliding window search.
*/
- (float)getAlpha NS_SWIFT_NAME(getAlpha());
//
// void cv::ximgproc::EdgeBoxes::setAlpha(float value)
//
/**
* Sets the step size of sliding window search.
*/
- (void)setAlpha:(float)value NS_SWIFT_NAME(setAlpha(value:));
//
// float cv::ximgproc::EdgeBoxes::getBeta()
//
/**
* Returns the nms threshold for object proposals.
*/
- (float)getBeta NS_SWIFT_NAME(getBeta());
//
// void cv::ximgproc::EdgeBoxes::setBeta(float value)
//
/**
* Sets the nms threshold for object proposals.
*/
- (void)setBeta:(float)value NS_SWIFT_NAME(setBeta(value:));
//
// float cv::ximgproc::EdgeBoxes::getEta()
//
/**
* Returns adaptation rate for nms threshold.
*/
- (float)getEta NS_SWIFT_NAME(getEta());
//
// void cv::ximgproc::EdgeBoxes::setEta(float value)
//
/**
* Sets the adaptation rate for nms threshold.
*/
- (void)setEta:(float)value NS_SWIFT_NAME(setEta(value:));
//
// float cv::ximgproc::EdgeBoxes::getMinScore()
//
/**
* Returns the min score of boxes to detect.
*/
- (float)getMinScore NS_SWIFT_NAME(getMinScore());
//
// void cv::ximgproc::EdgeBoxes::setMinScore(float value)
//
/**
* Sets the min score of boxes to detect.
*/
- (void)setMinScore:(float)value NS_SWIFT_NAME(setMinScore(value:));
//
// int cv::ximgproc::EdgeBoxes::getMaxBoxes()
//
/**
* Returns the max number of boxes to detect.
*/
- (int)getMaxBoxes NS_SWIFT_NAME(getMaxBoxes());
//
// void cv::ximgproc::EdgeBoxes::setMaxBoxes(int value)
//
/**
* Sets max number of boxes to detect.
*/
- (void)setMaxBoxes:(int)value NS_SWIFT_NAME(setMaxBoxes(value:));
//
// float cv::ximgproc::EdgeBoxes::getEdgeMinMag()
//
/**
* Returns the edge min magnitude.
*/
- (float)getEdgeMinMag NS_SWIFT_NAME(getEdgeMinMag());
//
// void cv::ximgproc::EdgeBoxes::setEdgeMinMag(float value)
//
/**
* Sets the edge min magnitude.
*/
- (void)setEdgeMinMag:(float)value NS_SWIFT_NAME(setEdgeMinMag(value:));
//
// float cv::ximgproc::EdgeBoxes::getEdgeMergeThr()
//
/**
* Returns the edge merge threshold.
*/
- (float)getEdgeMergeThr NS_SWIFT_NAME(getEdgeMergeThr());
//
// void cv::ximgproc::EdgeBoxes::setEdgeMergeThr(float value)
//
/**
* Sets the edge merge threshold.
*/
- (void)setEdgeMergeThr:(float)value NS_SWIFT_NAME(setEdgeMergeThr(value:));
//
// float cv::ximgproc::EdgeBoxes::getClusterMinMag()
//
/**
* Returns the cluster min magnitude.
*/
- (float)getClusterMinMag NS_SWIFT_NAME(getClusterMinMag());
//
// void cv::ximgproc::EdgeBoxes::setClusterMinMag(float value)
//
/**
* Sets the cluster min magnitude.
*/
- (void)setClusterMinMag:(float)value NS_SWIFT_NAME(setClusterMinMag(value:));
//
// float cv::ximgproc::EdgeBoxes::getMaxAspectRatio()
//
/**
* Returns the max aspect ratio of boxes.
*/
- (float)getMaxAspectRatio NS_SWIFT_NAME(getMaxAspectRatio());
//
// void cv::ximgproc::EdgeBoxes::setMaxAspectRatio(float value)
//
/**
* Sets the max aspect ratio of boxes.
*/
- (void)setMaxAspectRatio:(float)value NS_SWIFT_NAME(setMaxAspectRatio(value:));
//
// float cv::ximgproc::EdgeBoxes::getMinBoxArea()
//
/**
* Returns the minimum area of boxes.
*/
- (float)getMinBoxArea NS_SWIFT_NAME(getMinBoxArea());
//
// void cv::ximgproc::EdgeBoxes::setMinBoxArea(float value)
//
/**
* Sets the minimum area of boxes.
*/
- (void)setMinBoxArea:(float)value NS_SWIFT_NAME(setMinBoxArea(value:));
//
// float cv::ximgproc::EdgeBoxes::getGamma()
//
/**
* Returns the affinity sensitivity.
*/
- (float)getGamma NS_SWIFT_NAME(getGamma());
//
// void cv::ximgproc::EdgeBoxes::setGamma(float value)
//
/**
* Sets the affinity sensitivity
*/
- (void)setGamma:(float)value NS_SWIFT_NAME(setGamma(value:));
//
// float cv::ximgproc::EdgeBoxes::getKappa()
//
/**
* Returns the scale sensitivity.
*/
- (float)getKappa NS_SWIFT_NAME(getKappa());
//
// void cv::ximgproc::EdgeBoxes::setKappa(float value)
//
/**
* Sets the scale sensitivity.
*/
- (void)setKappa:(float)value NS_SWIFT_NAME(setKappa(value:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,149 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/edge_drawing.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class EdgeDrawingParams;
@class IntVector;
@class Mat;
@class Point2i;
// C++: enum GradientOperator (cv.ximgproc.EdgeDrawing.GradientOperator)
typedef NS_ENUM(int, GradientOperator) {
EdgeDrawing_PREWITT NS_SWIFT_NAME(PREWITT) = 0,
EdgeDrawing_SOBEL NS_SWIFT_NAME(SOBEL) = 1,
EdgeDrawing_SCHARR NS_SWIFT_NAME(SCHARR) = 2,
EdgeDrawing_LSD NS_SWIFT_NAME(LSD) = 3
};
NS_ASSUME_NONNULL_BEGIN
// C++: class EdgeDrawing
/**
* Class implementing the ED (EdgeDrawing) CITE: topal2012edge, EDLines CITE: akinlar2011edlines, EDPF CITE: akinlar2012edpf and EDCircles CITE: akinlar2013edcircles algorithms
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface EdgeDrawing : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::EdgeDrawing> nativePtrEdgeDrawing;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::EdgeDrawing>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::EdgeDrawing>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::EdgeDrawing::detectEdges(Mat src)
//
/**
* Detects edges in a grayscale image and prepares them to detect lines and ellipses.
*
* @param src 8-bit, single-channel, grayscale input image.
*/
- (void)detectEdges:(Mat*)src NS_SWIFT_NAME(detectEdges(src:));
//
// void cv::ximgproc::EdgeDrawing::getEdgeImage(Mat& dst)
//
/**
* returns Edge Image prepared by detectEdges() function.
*
* @param dst returns 8-bit, single-channel output image.
*/
- (void)getEdgeImage:(Mat*)dst NS_SWIFT_NAME(getEdgeImage(dst:));
//
// void cv::ximgproc::EdgeDrawing::getGradientImage(Mat& dst)
//
/**
* returns Gradient Image prepared by detectEdges() function.
*
* @param dst returns 16-bit, single-channel output image.
*/
- (void)getGradientImage:(Mat*)dst NS_SWIFT_NAME(getGradientImage(dst:));
//
// vector_vector_Point cv::ximgproc::EdgeDrawing::getSegments()
//
/**
* Returns std::vector<std::vector<Point>> of detected edge segments, see detectEdges()
*/
- (NSArray<NSArray<Point2i*>*>*)getSegments NS_SWIFT_NAME(getSegments());
//
// vector_int cv::ximgproc::EdgeDrawing::getSegmentIndicesOfLines()
//
/**
* Returns for each line found in detectLines() its edge segment index in getSegments()
*/
- (IntVector*)getSegmentIndicesOfLines NS_SWIFT_NAME(getSegmentIndicesOfLines());
//
// void cv::ximgproc::EdgeDrawing::detectLines(Mat& lines)
//
/**
* Detects lines.
*
* @param lines output Vec<4f> contains the start point and the end point of detected lines.
* NOTE: you should call detectEdges() before calling this function.
*/
- (void)detectLines:(Mat*)lines NS_SWIFT_NAME(detectLines(lines:));
//
// void cv::ximgproc::EdgeDrawing::detectEllipses(Mat& ellipses)
//
/**
* Detects circles and ellipses.
*
* @param ellipses output Vec<6d> contains center point and perimeter for circles, center point, axes and angle for ellipses.
* NOTE: you should call detectEdges() before calling this function.
*/
- (void)detectEllipses:(Mat*)ellipses NS_SWIFT_NAME(detectEllipses(ellipses:));
//
// void cv::ximgproc::EdgeDrawing::setParams(EdgeDrawing_Params parameters)
//
/**
* sets parameters.
*
* this function is meant to be used for parameter setting in other languages than c++ like python.
* @param parameters Parameters of the algorithm
*/
- (void)setParams:(EdgeDrawingParams*)parameters NS_SWIFT_NAME(setParams(parameters:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,134 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/edge_drawing.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
// C++: class Params
/**
* The Params module
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface EdgeDrawingParams : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::EdgeDrawing::Params> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::EdgeDrawing::Params>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::EdgeDrawing::Params>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::ximgproc::EdgeDrawing::Params::Params()
//
- (instancetype)init;
//
// C++: bool cv::ximgproc::EdgeDrawing::Params::PFmode
//
@property BOOL PFmode;
//
// C++: int cv::ximgproc::EdgeDrawing::Params::EdgeDetectionOperator
//
@property int EdgeDetectionOperator;
//
// C++: int cv::ximgproc::EdgeDrawing::Params::GradientThresholdValue
//
@property int GradientThresholdValue;
//
// C++: int cv::ximgproc::EdgeDrawing::Params::AnchorThresholdValue
//
@property int AnchorThresholdValue;
//
// C++: int cv::ximgproc::EdgeDrawing::Params::ScanInterval
//
@property int ScanInterval;
//
// C++: int cv::ximgproc::EdgeDrawing::Params::MinPathLength
//
@property int MinPathLength;
//
// C++: float cv::ximgproc::EdgeDrawing::Params::Sigma
//
@property float Sigma;
//
// C++: bool cv::ximgproc::EdgeDrawing::Params::SumFlag
//
@property BOOL SumFlag;
//
// C++: bool cv::ximgproc::EdgeDrawing::Params::NFAValidation
//
@property BOOL NFAValidation;
//
// C++: int cv::ximgproc::EdgeDrawing::Params::MinLineLength
//
@property int MinLineLength;
//
// C++: double cv::ximgproc::EdgeDrawing::Params::MaxDistanceBetweenTwoLines
//
@property double MaxDistanceBetweenTwoLines;
//
// C++: double cv::ximgproc::EdgeDrawing::Params::LineFitErrorThreshold
//
@property double LineFitErrorThreshold;
//
// C++: double cv::ximgproc::EdgeDrawing::Params::MaxErrorThreshold
//
@property double MaxErrorThreshold;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,144 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/facerec.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "BasicFaceRecognizer.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class EigenFaceRecognizer
/**
* The EigenFaceRecognizer module
*
* Member of `Face`
*/
CV_EXPORTS @interface EigenFaceRecognizer : BasicFaceRecognizer
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::EigenFaceRecognizer> nativePtrEigenFaceRecognizer;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::EigenFaceRecognizer>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::EigenFaceRecognizer>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_EigenFaceRecognizer cv::face::EigenFaceRecognizer::create(int num_components = 0, double threshold = DBL_MAX)
//
/**
* @param num_components The number of components (read: Eigenfaces) kept for this Principal
* Component Analysis. As a hint: There's no rule how many components (read: Eigenfaces) should be
* kept for good reconstruction capabilities. It is based on your input data, so experiment with the
* number. Keeping 80 components should almost always be sufficient.
* @param threshold The threshold applied in the prediction.
*
* ### Notes:
*
* - Training and prediction must be done on grayscale images, use cvtColor to convert between the
* color spaces.
* - **THE EIGENFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL
* SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your
* input data has the correct shape, else a meaningful exception is thrown. Use resize to resize
* the images.
* - This model does not support updating.
*
* ### Model internal data:
*
* - num_components see EigenFaceRecognizer::create.
* - threshold see EigenFaceRecognizer::create.
* - eigenvalues The eigenvalues for this Principal Component Analysis (ordered descending).
* - eigenvectors The eigenvectors for this Principal Component Analysis (ordered by their
* eigenvalue).
* - mean The sample mean calculated from the training data.
* - projections The projections of the training data.
* - labels The threshold applied in the prediction. If the distance to the nearest neighbor is
* larger than the threshold, this method returns -1.
*/
+ (EigenFaceRecognizer*)create:(int)num_components threshold:(double)threshold NS_SWIFT_NAME(create(num_components:threshold:));
/**
* @param num_components The number of components (read: Eigenfaces) kept for this Principal
* Component Analysis. As a hint: There's no rule how many components (read: Eigenfaces) should be
* kept for good reconstruction capabilities. It is based on your input data, so experiment with the
* number. Keeping 80 components should almost always be sufficient.
*
* ### Notes:
*
* - Training and prediction must be done on grayscale images, use cvtColor to convert between the
* color spaces.
* - **THE EIGENFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL
* SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your
* input data has the correct shape, else a meaningful exception is thrown. Use resize to resize
* the images.
* - This model does not support updating.
*
* ### Model internal data:
*
* - num_components see EigenFaceRecognizer::create.
* - threshold see EigenFaceRecognizer::create.
* - eigenvalues The eigenvalues for this Principal Component Analysis (ordered descending).
* - eigenvectors The eigenvectors for this Principal Component Analysis (ordered by their
* eigenvalue).
* - mean The sample mean calculated from the training data.
* - projections The projections of the training data.
* - labels The threshold applied in the prediction. If the distance to the nearest neighbor is
* larger than the threshold, this method returns -1.
*/
+ (EigenFaceRecognizer*)create:(int)num_components NS_SWIFT_NAME(create(num_components:));
/**
* Component Analysis. As a hint: There's no rule how many components (read: Eigenfaces) should be
* kept for good reconstruction capabilities. It is based on your input data, so experiment with the
* number. Keeping 80 components should almost always be sufficient.
*
* ### Notes:
*
* - Training and prediction must be done on grayscale images, use cvtColor to convert between the
* color spaces.
* - **THE EIGENFACES METHOD MAKES THE ASSUMPTION, THAT THE TRAINING AND TEST IMAGES ARE OF EQUAL
* SIZE.** (caps-lock, because I got so many mails asking for this). You have to make sure your
* input data has the correct shape, else a meaningful exception is thrown. Use resize to resize
* the images.
* - This model does not support updating.
*
* ### Model internal data:
*
* - num_components see EigenFaceRecognizer::create.
* - threshold see EigenFaceRecognizer::create.
* - eigenvalues The eigenvalues for this Principal Component Analysis (ordered descending).
* - eigenvectors The eigenvectors for this Principal Component Analysis (ordered by their
* eigenvalue).
* - mean The sample mean calculated from the training data.
* - projections The projections of the training data.
* - labels The threshold applied in the prediction. If the distance to the nearest neighbor is
* larger than the threshold, this method returns -1.
*/
+ (EigenFaceRecognizer*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,82 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/aruco.hpp"
#import "aruco/charuco.hpp"
#import "opencv2/aruco/aruco_calib.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Aruco.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class EstimateParameters
/**
* Pose estimation parameters
*
* pattern Defines center this system and axes direction (default PatternPositionType::ARUCO_CCW_CENTER).
* useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided
* rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further
* optimizes them (default false).
* solvePnPMethod Method for solving a PnP problem: see REF: calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE).
* @see PatternPositionType, solvePnP(), check tutorial_aruco_detection in aruco contrib
*
* Member of `Aruco`
*/
CV_EXPORTS @interface EstimateParameters : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::aruco::EstimateParameters> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::aruco::EstimateParameters>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::aruco::EstimateParameters>)nativePtr;
#endif
#pragma mark - Methods
//
// cv::aruco::EstimateParameters::EstimateParameters()
//
- (instancetype)init;
//
// C++: PatternPositionType cv::aruco::EstimateParameters::pattern
//
@property PatternPositionType pattern;
//
// C++: bool cv::aruco::EstimateParameters::useExtrinsicGuess
//
@property BOOL useExtrinsicGuess;
//
// C++: int cv::aruco::EstimateParameters::solvePnPMethod
//
@property int solvePnPMethod;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,156 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/xfeatures2d.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Feature2D.h"
@class IntVector;
NS_ASSUME_NONNULL_BEGIN
// C++: class FREAK
/**
* Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in CITE: AOV12 .
*
* The algorithm propose a novel keypoint descriptor inspired by the human visual system and more
* precisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is
* computed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in
* general faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK.
* They are competitive alternatives to existing keypoints in particular for embedded applications.
*
* NOTE:
* - An example on how to use the FREAK descriptor can be found at
* opencv_source_code/samples/cpp/freak_demo.cpp
*
* Member of `Xfeatures2d`
*/
CV_EXPORTS @interface FREAK : Feature2D
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::xfeatures2d::FREAK> nativePtrFREAK;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::xfeatures2d::FREAK>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::xfeatures2d::FREAK>)nativePtr;
#endif
#pragma mark - Methods
//
// static Ptr_FREAK cv::xfeatures2d::FREAK::create(bool orientationNormalized = true, bool scaleNormalized = true, float patternScale = 22.0f, int nOctaves = 4, vector_int selectedPairs = std::vector<int>())
//
/**
* @param orientationNormalized Enable orientation normalization.
* @param scaleNormalized Enable scale normalization.
* @param patternScale Scaling of the description pattern.
* @param nOctaves Number of octaves covered by the detected keypoints.
* @param selectedPairs (Optional) user defined selected pairs indexes,
*/
+ (FREAK*)create:(BOOL)orientationNormalized scaleNormalized:(BOOL)scaleNormalized patternScale:(float)patternScale nOctaves:(int)nOctaves selectedPairs:(IntVector*)selectedPairs NS_SWIFT_NAME(create(orientationNormalized:scaleNormalized:patternScale:nOctaves:selectedPairs:));
/**
* @param orientationNormalized Enable orientation normalization.
* @param scaleNormalized Enable scale normalization.
* @param patternScale Scaling of the description pattern.
* @param nOctaves Number of octaves covered by the detected keypoints.
*/
+ (FREAK*)create:(BOOL)orientationNormalized scaleNormalized:(BOOL)scaleNormalized patternScale:(float)patternScale nOctaves:(int)nOctaves NS_SWIFT_NAME(create(orientationNormalized:scaleNormalized:patternScale:nOctaves:));
/**
* @param orientationNormalized Enable orientation normalization.
* @param scaleNormalized Enable scale normalization.
* @param patternScale Scaling of the description pattern.
*/
+ (FREAK*)create:(BOOL)orientationNormalized scaleNormalized:(BOOL)scaleNormalized patternScale:(float)patternScale NS_SWIFT_NAME(create(orientationNormalized:scaleNormalized:patternScale:));
/**
* @param orientationNormalized Enable orientation normalization.
* @param scaleNormalized Enable scale normalization.
*/
+ (FREAK*)create:(BOOL)orientationNormalized scaleNormalized:(BOOL)scaleNormalized NS_SWIFT_NAME(create(orientationNormalized:scaleNormalized:));
/**
* @param orientationNormalized Enable orientation normalization.
*/
+ (FREAK*)create:(BOOL)orientationNormalized NS_SWIFT_NAME(create(orientationNormalized:));
/**
*/
+ (FREAK*)create NS_SWIFT_NAME(create());
//
// void cv::xfeatures2d::FREAK::setOrientationNormalized(bool orientationNormalized)
//
- (void)setOrientationNormalized:(BOOL)orientationNormalized NS_SWIFT_NAME(setOrientationNormalized(orientationNormalized:));
//
// bool cv::xfeatures2d::FREAK::getOrientationNormalized()
//
- (BOOL)getOrientationNormalized NS_SWIFT_NAME(getOrientationNormalized());
//
// void cv::xfeatures2d::FREAK::setScaleNormalized(bool scaleNormalized)
//
- (void)setScaleNormalized:(BOOL)scaleNormalized NS_SWIFT_NAME(setScaleNormalized(scaleNormalized:));
//
// bool cv::xfeatures2d::FREAK::getScaleNormalized()
//
- (BOOL)getScaleNormalized NS_SWIFT_NAME(getScaleNormalized());
//
// void cv::xfeatures2d::FREAK::setPatternScale(double patternScale)
//
- (void)setPatternScale:(double)patternScale NS_SWIFT_NAME(setPatternScale(patternScale:));
//
// double cv::xfeatures2d::FREAK::getPatternScale()
//
- (double)getPatternScale NS_SWIFT_NAME(getPatternScale());
//
// void cv::xfeatures2d::FREAK::setNOctaves(int nOctaves)
//
- (void)setNOctaves:(int)nOctaves NS_SWIFT_NAME(setNOctaves(nOctaves:));
//
// int cv::xfeatures2d::FREAK::getNOctaves()
//
- (int)getNOctaves NS_SWIFT_NAME(getNOctaves());
//
// String cv::xfeatures2d::FREAK::getDefaultName()
//
- (NSString*)getDefaultName NS_SWIFT_NAME(getDefaultName());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,399 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Facemark;
@class Mat;
@class Point2f;
@class Scalar;
NS_ASSUME_NONNULL_BEGIN
// C++: class Face
/**
* The Face module
*
* Member classes: `FaceRecognizer`, `Facemark`, `PredictCollector`, `StandardCollector`, `FacemarkKazemi`, `FacemarkAAM`, `BIF`, `MACE`, `FacemarkTrain`, `FacemarkLBF`, `BasicFaceRecognizer`, `EigenFaceRecognizer`, `FisherFaceRecognizer`, `LBPHFaceRecognizer`
*
*/
CV_EXPORTS @interface Face : NSObject
#pragma mark - Methods
//
// Ptr_Facemark cv::face::createFacemarkAAM()
//
+ (Facemark*)createFacemarkAAM NS_SWIFT_NAME(createFacemarkAAM());
//
// Ptr_Facemark cv::face::createFacemarkLBF()
//
+ (Facemark*)createFacemarkLBF NS_SWIFT_NAME(createFacemarkLBF());
//
// Ptr_Facemark cv::face::createFacemarkKazemi()
//
+ (Facemark*)createFacemarkKazemi NS_SWIFT_NAME(createFacemarkKazemi());
//
// bool cv::face::getFacesHAAR(Mat image, Mat& faces, String face_cascade_name)
//
/**
* Default face detector
* This function is mainly utilized by the implementation of a Facemark Algorithm.
* End users are advised to use function Facemark::getFaces which can be manually defined
* and circumvented to the algorithm by Facemark::setFaceDetector.
*
* @param image The input image to be processed.
* @param faces Output of the function which represent region of interest of the detected faces.
* Each face is stored in cv::Rect container.
*
* <B>Example of usage</B>
*
* std::vector<cv::Rect> faces;
* CParams params("haarcascade_frontalface_alt.xml");
* cv::face::getFaces(frame, faces, &params);
* for(int j=0;j<faces.size();j++){
* cv::rectangle(frame, faces[j], cv::Scalar(255,0,255));
* }
* cv::imshow("detection", frame);
*
*/
+ (BOOL)getFacesHAAR:(Mat*)image faces:(Mat*)faces face_cascade_name:(NSString*)face_cascade_name NS_SWIFT_NAME(getFacesHAAR(image:faces:face_cascade_name:));
//
// bool cv::face::loadDatasetList(String imageList, String annotationList, vector_String images, vector_String annotations)
//
/**
* A utility to load list of paths to training image and annotation file.
* @param imageList The specified file contains paths to the training images.
* @param annotationList The specified file contains paths to the training annotations.
* @param images The loaded paths of training images.
* @param annotations The loaded paths of annotation files.
*
* Example of usage:
*
* String imageFiles = "images_path.txt";
* String ptsFiles = "annotations_path.txt";
* std::vector<String> images_train;
* std::vector<String> landmarks_train;
* loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
*
*/
+ (BOOL)loadDatasetList:(NSString*)imageList annotationList:(NSString*)annotationList images:(NSArray<NSString*>*)images annotations:(NSArray<NSString*>*)annotations NS_SWIFT_NAME(loadDatasetList(imageList:annotationList:images:annotations:));
//
// bool cv::face::loadTrainingData(String filename, vector_String images, Mat& facePoints, char delim = ' ', float offset = 0.0f)
//
/**
* A utility to load facial landmark dataset from a single file.
*
* @param filename The filename of a file that contains the dataset information.
* Each line contains the filename of an image followed by
* pairs of x and y values of facial landmarks points separated by a space.
* Example
*
* /home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
* /home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
*
* @param images A vector where each element represent the filename of image in the dataset.
* Images are not loaded by default to save the memory.
* @param facePoints The loaded landmark points for all training data.
* @param delim Delimiter between each element, the default value is a whitespace.
* @param offset An offset value to adjust the loaded points.
*
* <B>Example of usage</B>
*
* cv::String imageFiles = "../data/images_train.txt";
* cv::String ptsFiles = "../data/points_train.txt";
* std::vector<String> images;
* std::vector<std::vector<Point2f> > facePoints;
* loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
*
*/
+ (BOOL)loadTrainingData:(NSString*)filename images:(NSArray<NSString*>*)images facePoints:(Mat*)facePoints delim:(char)delim offset:(float)offset NS_SWIFT_NAME(loadTrainingData(filename:images:facePoints:delim:offset:));
/**
* A utility to load facial landmark dataset from a single file.
*
* @param filename The filename of a file that contains the dataset information.
* Each line contains the filename of an image followed by
* pairs of x and y values of facial landmarks points separated by a space.
* Example
*
* /home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
* /home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
*
* @param images A vector where each element represent the filename of image in the dataset.
* Images are not loaded by default to save the memory.
* @param facePoints The loaded landmark points for all training data.
* @param delim Delimiter between each element, the default value is a whitespace.
*
* <B>Example of usage</B>
*
* cv::String imageFiles = "../data/images_train.txt";
* cv::String ptsFiles = "../data/points_train.txt";
* std::vector<String> images;
* std::vector<std::vector<Point2f> > facePoints;
* loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
*
*/
+ (BOOL)loadTrainingData:(NSString*)filename images:(NSArray<NSString*>*)images facePoints:(Mat*)facePoints delim:(char)delim NS_SWIFT_NAME(loadTrainingData(filename:images:facePoints:delim:));
/**
* A utility to load facial landmark dataset from a single file.
*
* @param filename The filename of a file that contains the dataset information.
* Each line contains the filename of an image followed by
* pairs of x and y values of facial landmarks points separated by a space.
* Example
*
* /home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
* /home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
*
* @param images A vector where each element represent the filename of image in the dataset.
* Images are not loaded by default to save the memory.
* @param facePoints The loaded landmark points for all training data.
*
* <B>Example of usage</B>
*
* cv::String imageFiles = "../data/images_train.txt";
* cv::String ptsFiles = "../data/points_train.txt";
* std::vector<String> images;
* std::vector<std::vector<Point2f> > facePoints;
* loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
*
*/
+ (BOOL)loadTrainingData:(NSString*)filename images:(NSArray<NSString*>*)images facePoints:(Mat*)facePoints NS_SWIFT_NAME(loadTrainingData(filename:images:facePoints:));
//
// bool cv::face::loadTrainingData(String imageList, String groundTruth, vector_String images, Mat& facePoints, float offset = 0.0f)
//
/**
* A utility to load facial landmark information from the dataset.
*
* @param imageList A file contains the list of image filenames in the training dataset.
* @param groundTruth A file contains the list of filenames
* where the landmarks points information are stored.
* The content in each file should follow the standard format (see face::loadFacePoints).
* @param images A vector where each element represent the filename of image in the dataset.
* Images are not loaded by default to save the memory.
* @param facePoints The loaded landmark points for all training data.
* @param offset An offset value to adjust the loaded points.
*
* <B>Example of usage</B>
*
* cv::String imageFiles = "../data/images_train.txt";
* cv::String ptsFiles = "../data/points_train.txt";
* std::vector<String> images;
* std::vector<std::vector<Point2f> > facePoints;
* loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
*
*
* example of content in the images_train.txt
*
* /home/user/ibug/image_003_1.jpg
* /home/user/ibug/image_004_1.jpg
* /home/user/ibug/image_005_1.jpg
* /home/user/ibug/image_006.jpg
*
*
* example of content in the points_train.txt
*
* /home/user/ibug/image_003_1.pts
* /home/user/ibug/image_004_1.pts
* /home/user/ibug/image_005_1.pts
* /home/user/ibug/image_006.pts
*
*/
+ (BOOL)loadTrainingData:(NSString*)imageList groundTruth:(NSString*)groundTruth images:(NSArray<NSString*>*)images facePoints:(Mat*)facePoints offset:(float)offset NS_SWIFT_NAME(loadTrainingData(imageList:groundTruth:images:facePoints:offset:));
/**
* A utility to load facial landmark information from the dataset.
*
* @param imageList A file contains the list of image filenames in the training dataset.
* @param groundTruth A file contains the list of filenames
* where the landmarks points information are stored.
* The content in each file should follow the standard format (see face::loadFacePoints).
* @param images A vector where each element represent the filename of image in the dataset.
* Images are not loaded by default to save the memory.
* @param facePoints The loaded landmark points for all training data.
*
* <B>Example of usage</B>
*
* cv::String imageFiles = "../data/images_train.txt";
* cv::String ptsFiles = "../data/points_train.txt";
* std::vector<String> images;
* std::vector<std::vector<Point2f> > facePoints;
* loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
*
*
* example of content in the images_train.txt
*
* /home/user/ibug/image_003_1.jpg
* /home/user/ibug/image_004_1.jpg
* /home/user/ibug/image_005_1.jpg
* /home/user/ibug/image_006.jpg
*
*
* example of content in the points_train.txt
*
* /home/user/ibug/image_003_1.pts
* /home/user/ibug/image_004_1.pts
* /home/user/ibug/image_005_1.pts
* /home/user/ibug/image_006.pts
*
*/
+ (BOOL)loadTrainingData:(NSString*)imageList groundTruth:(NSString*)groundTruth images:(NSArray<NSString*>*)images facePoints:(Mat*)facePoints NS_SWIFT_NAME(loadTrainingData(imageList:groundTruth:images:facePoints:));
//
// bool cv::face::loadTrainingData(vector_String filename, vector_vector_Point2f trainlandmarks, vector_String trainimages)
//
/**
* This function extracts the data for training from .txt files which contains the corresponding image name and landmarks.
* The first file in each file should give the path of the image whose
* landmarks are being described in the file. Then in the subsequent
* lines there should be coordinates of the landmarks in the image
* i.e each line should be of the form x,y
* where x represents the x coordinate of the landmark and y represents
* the y coordinate of the landmark.
*
* For reference you can see the files as provided in the
* <a href="http://www.ifp.illinois.edu/~vuongle2/helen/">HELEN dataset</a>
*
* @param filename A vector of type cv::String containing name of the .txt files.
* @param trainlandmarks A vector of type cv::Point2f that would store shape or landmarks of all images.
* @param trainimages A vector of type cv::String which stores the name of images whose landmarks are tracked
* @return A boolean value. It returns true when it reads the data successfully and false otherwise
*/
+ (BOOL)loadTrainingData:(NSArray<NSString*>*)filename trainlandmarks:(NSArray<NSArray<Point2f*>*>*)trainlandmarks trainimages:(NSArray<NSString*>*)trainimages NS_SWIFT_NAME(loadTrainingData(filename:trainlandmarks:trainimages:));
//
// bool cv::face::loadFacePoints(String filename, Mat& points, float offset = 0.0f)
//
/**
* A utility to load facial landmark information from a given file.
*
* @param filename The filename of file contains the facial landmarks data.
* @param points The loaded facial landmark points.
* @param offset An offset value to adjust the loaded points.
*
* <B>Example of usage</B>
*
* std::vector<Point2f> points;
* face::loadFacePoints("filename.txt", points, 0.0f);
*
*
* The annotation file should follow the default format which is
*
* version: 1
* n_points: 68
* {
* 212.716603 499.771793
* 230.232816 566.290071
* ...
* }
*
* where n_points is the number of points considered
* and each point is represented as its position in x and y.
*/
+ (BOOL)loadFacePoints:(NSString*)filename points:(Mat*)points offset:(float)offset NS_SWIFT_NAME(loadFacePoints(filename:points:offset:));
/**
* A utility to load facial landmark information from a given file.
*
* @param filename The filename of file contains the facial landmarks data.
* @param points The loaded facial landmark points.
*
* <B>Example of usage</B>
*
* std::vector<Point2f> points;
* face::loadFacePoints("filename.txt", points, 0.0f);
*
*
* The annotation file should follow the default format which is
*
* version: 1
* n_points: 68
* {
* 212.716603 499.771793
* 230.232816 566.290071
* ...
* }
*
* where n_points is the number of points considered
* and each point is represented as its position in x and y.
*/
+ (BOOL)loadFacePoints:(NSString*)filename points:(Mat*)points NS_SWIFT_NAME(loadFacePoints(filename:points:));
//
// void cv::face::drawFacemarks(Mat& image, Mat points, Scalar color = Scalar(255,0,0))
//
/**
* Utility to draw the detected facial landmark points
*
* @param image The input image to be processed.
* @param points Contains the data of points which will be drawn.
* @param color The color of points in BGR format represented by cv::Scalar.
*
* <B>Example of usage</B>
*
* std::vector<Rect> faces;
* std::vector<std::vector<Point2f> > landmarks;
* facemark->getFaces(img, faces);
* facemark->fit(img, faces, landmarks);
* for(int j=0;j<rects.size();j++){
* face::drawFacemarks(frame, landmarks[j], Scalar(0,0,255));
* }
*
*/
+ (void)drawFacemarks:(Mat*)image points:(Mat*)points color:(Scalar*)color NS_SWIFT_NAME(drawFacemarks(image:points:color:));
/**
* Utility to draw the detected facial landmark points
*
* @param image The input image to be processed.
* @param points Contains the data of points which will be drawn.
*
* <B>Example of usage</B>
*
* std::vector<Rect> faces;
* std::vector<std::vector<Point2f> > landmarks;
* facemark->getFaces(img, faces);
* facemark->fit(img, faces, landmarks);
* for(int j=0;j<rects.size();j++){
* face::drawFacemarks(frame, landmarks[j], Scalar(0,0,255));
* }
*
*/
+ (void)drawFacemarks:(Mat*)image points:(Mat*)points NS_SWIFT_NAME(drawFacemarks(image:points:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,296 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/face.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class ByteVector;
@class Mat;
@class Size2i;
NS_ASSUME_NONNULL_BEGIN
// C++: class FaceDetectorYN
/**
* DNN-based face detector
*
* model download link: https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface FaceDetectorYN : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::FaceDetectorYN> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::FaceDetectorYN>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::FaceDetectorYN>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::FaceDetectorYN::setInputSize(Size input_size)
//
/**
* Set the size for the network input, which overwrites the input size of creating model. Call this method when the size of input image does not match the input size when creating model
*
* @param input_size the size of the input image
*/
- (void)setInputSize:(Size2i*)input_size NS_SWIFT_NAME(setInputSize(input_size:));
//
// Size cv::FaceDetectorYN::getInputSize()
//
- (Size2i*)getInputSize NS_SWIFT_NAME(getInputSize());
//
// void cv::FaceDetectorYN::setScoreThreshold(float score_threshold)
//
/**
* Set the score threshold to filter out bounding boxes of score less than the given value
*
* @param score_threshold threshold for filtering out bounding boxes
*/
- (void)setScoreThreshold:(float)score_threshold NS_SWIFT_NAME(setScoreThreshold(score_threshold:));
//
// float cv::FaceDetectorYN::getScoreThreshold()
//
- (float)getScoreThreshold NS_SWIFT_NAME(getScoreThreshold());
//
// void cv::FaceDetectorYN::setNMSThreshold(float nms_threshold)
//
/**
* Set the Non-maximum-suppression threshold to suppress bounding boxes that have IoU greater than the given value
*
* @param nms_threshold threshold for NMS operation
*/
- (void)setNMSThreshold:(float)nms_threshold NS_SWIFT_NAME(setNMSThreshold(nms_threshold:));
//
// float cv::FaceDetectorYN::getNMSThreshold()
//
- (float)getNMSThreshold NS_SWIFT_NAME(getNMSThreshold());
//
// void cv::FaceDetectorYN::setTopK(int top_k)
//
/**
* Set the number of bounding boxes preserved before NMS
*
* @param top_k the number of bounding boxes to preserve from top rank based on score
*/
- (void)setTopK:(int)top_k NS_SWIFT_NAME(setTopK(top_k:));
//
// int cv::FaceDetectorYN::getTopK()
//
- (int)getTopK NS_SWIFT_NAME(getTopK());
//
// int cv::FaceDetectorYN::detect(Mat image, Mat& faces)
//
/**
* Detects faces in the input image. Following is an example output.
*
* ![image](pics/lena-face-detection.jpg)
*
* @param image an image to detect
* @param faces detection results stored in a 2D cv::Mat of shape [num_faces, 15]
* - 0-1: x, y of bbox top left corner
* - 2-3: width, height of bbox
* - 4-5: x, y of right eye (blue point in the example image)
* - 6-7: x, y of left eye (red point in the example image)
* - 8-9: x, y of nose tip (green point in the example image)
* - 10-11: x, y of right corner of mouth (pink point in the example image)
* - 12-13: x, y of left corner of mouth (yellow point in the example image)
* - 14: face score
*/
- (int)detect:(Mat*)image faces:(Mat*)faces NS_SWIFT_NAME(detect(image:faces:));
//
// static Ptr_FaceDetectorYN cv::FaceDetectorYN::create(String model, String config, Size input_size, float score_threshold = 0.9f, float nms_threshold = 0.3f, int top_k = 5000, int backend_id = 0, int target_id = 0)
//
/**
* Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
* @param backend_id the id of backend
* @param target_id the id of target device
*/
+ (FaceDetectorYN*)create:(NSString*)model config:(NSString*)config input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold top_k:(int)top_k backend_id:(int)backend_id target_id:(int)target_id NS_SWIFT_NAME(create(model:config:input_size:score_threshold:nms_threshold:top_k:backend_id:target_id:));
/**
* Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
* @param backend_id the id of backend
*/
+ (FaceDetectorYN*)create:(NSString*)model config:(NSString*)config input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold top_k:(int)top_k backend_id:(int)backend_id NS_SWIFT_NAME(create(model:config:input_size:score_threshold:nms_threshold:top_k:backend_id:));
/**
* Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
*/
+ (FaceDetectorYN*)create:(NSString*)model config:(NSString*)config input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold top_k:(int)top_k NS_SWIFT_NAME(create(model:config:input_size:score_threshold:nms_threshold:top_k:));
/**
* Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
*/
+ (FaceDetectorYN*)create:(NSString*)model config:(NSString*)config input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold NS_SWIFT_NAME(create(model:config:input_size:score_threshold:nms_threshold:));
/**
* Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
*/
+ (FaceDetectorYN*)create:(NSString*)model config:(NSString*)config input_size:(Size2i*)input_size score_threshold:(float)score_threshold NS_SWIFT_NAME(create(model:config:input_size:score_threshold:));
/**
* Creates an instance of face detector class with given parameters
*
* @param model the path to the requested model
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param input_size the size of the input image
*/
+ (FaceDetectorYN*)create:(NSString*)model config:(NSString*)config input_size:(Size2i*)input_size NS_SWIFT_NAME(create(model:config:input_size:));
//
// static Ptr_FaceDetectorYN cv::FaceDetectorYN::create(String framework, vector_uchar bufferModel, vector_uchar bufferConfig, Size input_size, float score_threshold = 0.9f, float nms_threshold = 0.3f, int top_k = 5000, int backend_id = 0, int target_id = 0)
//
/**
*
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
* @param backend_id the id of backend
* @param target_id the id of target device
*/
+ (FaceDetectorYN*)create:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold top_k:(int)top_k backend_id:(int)backend_id target_id:(int)target_id NS_SWIFT_NAME(create(framework:bufferModel:bufferConfig:input_size:score_threshold:nms_threshold:top_k:backend_id:target_id:));
/**
*
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
* @param backend_id the id of backend
*/
+ (FaceDetectorYN*)create:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold top_k:(int)top_k backend_id:(int)backend_id NS_SWIFT_NAME(create(framework:bufferModel:bufferConfig:input_size:score_threshold:nms_threshold:top_k:backend_id:));
/**
*
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
* @param top_k keep top K bboxes before NMS
*/
+ (FaceDetectorYN*)create:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold top_k:(int)top_k NS_SWIFT_NAME(create(framework:bufferModel:bufferConfig:input_size:score_threshold:nms_threshold:top_k:));
/**
*
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
* @param nms_threshold the threshold to suppress bounding boxes of IoU bigger than the given value
*/
+ (FaceDetectorYN*)create:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig input_size:(Size2i*)input_size score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold NS_SWIFT_NAME(create(framework:bufferModel:bufferConfig:input_size:score_threshold:nms_threshold:));
/**
*
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
* @param score_threshold the threshold to filter out bounding boxes of score smaller than the given value
*/
+ (FaceDetectorYN*)create:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig input_size:(Size2i*)input_size score_threshold:(float)score_threshold NS_SWIFT_NAME(create(framework:bufferModel:bufferConfig:input_size:score_threshold:));
/**
*
*
* @param framework Name of origin framework
* @param bufferModel A buffer with a content of binary file with weights
* @param bufferConfig A buffer with a content of text file contains network configuration
* @param input_size the size of the input image
*/
+ (FaceDetectorYN*)create:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig input_size:(Size2i*)input_size NS_SWIFT_NAME(create(framework:bufferModel:bufferConfig:input_size:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,386 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class IntVector;
@class Mat;
@class PredictCollector;
NS_ASSUME_NONNULL_BEGIN
// C++: class FaceRecognizer
/**
* Abstract base class for all face recognition models
*
* All face recognition models in OpenCV are derived from the abstract base class FaceRecognizer, which
* provides a unified access to all face recongition algorithms in OpenCV.
*
* ### Description
*
* I'll go a bit more into detail explaining FaceRecognizer, because it doesn't look like a powerful
* interface at first sight. But: Every FaceRecognizer is an Algorithm, so you can easily get/set all
* model internals (if allowed by the implementation). Algorithm is a relatively new OpenCV concept,
* which is available since the 2.4 release. I suggest you take a look at its description.
*
* Algorithm provides the following features for all derived classes:
*
* - So called "virtual constructor". That is, each Algorithm derivative is registered at program
* start and you can get the list of registered algorithms and create instance of a particular
* algorithm by its name (see Algorithm::create). If you plan to add your own algorithms, it is
* good practice to add a unique prefix to your algorithms to distinguish them from other
* algorithms.
* - Setting/Retrieving algorithm parameters by name. If you used video capturing functionality from
* OpenCV highgui module, you are probably familar with cv::cvSetCaptureProperty,
* ocvcvGetCaptureProperty, VideoCapture::set and VideoCapture::get. Algorithm provides similar
* method where instead of integer id's you specify the parameter names as text Strings. See
* Algorithm::set and Algorithm::get for details.
* - Reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store
* all its parameters and then read them back. There is no need to re-implement it each time.
*
* Moreover every FaceRecognizer supports the:
*
* - **Training** of a FaceRecognizer with FaceRecognizer::train on a given set of images (your face
* database!).
* - **Prediction** of a given sample image, that means a face. The image is given as a Mat.
* - **Loading/Saving** the model state from/to a given XML or YAML.
* - **Setting/Getting labels info**, that is stored as a string. String labels info is useful for
* keeping names of the recognized people.
*
* NOTE: When using the FaceRecognizer interface in combination with Python, please stick to Python 2.
* Some underlying scripts like create_csv will not work in other versions, like Python 3. Setting the
* Thresholds +++++++++++++++++++++++
*
* Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common
* scenario in face recognition is to tell, whether a face belongs to the training dataset or if it is
* unknown. You might wonder, why there's no public API in FaceRecognizer to set the threshold for the
* prediction, but rest assured: It's supported. It just means there's no generic way in an abstract
* class to provide an interface for setting/getting the thresholds of *every possible* FaceRecognizer
* algorithm. The appropriate place to set the thresholds is in the constructor of the specific
* FaceRecognizer and since every FaceRecognizer is a Algorithm (see above), you can get/set the
* thresholds at runtime!
*
* Here is an example of setting a threshold for the Eigenfaces method, when creating the model:
*
*
* // Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0
* int num_components = 10;
* double threshold = 10.0;
* // Then if you want to have a cv::FaceRecognizer with a confidence threshold,
* // create the concrete implementation with the appropriate parameters:
* Ptr<FaceRecognizer> model = EigenFaceRecognizer::create(num_components, threshold);
*
*
* Sometimes it's impossible to train the model, just to experiment with threshold values. Thanks to
* Algorithm it's possible to set internal model thresholds during runtime. Let's see how we would
* set/get the prediction for the Eigenface model, we've created above:
*
*
* // The following line reads the threshold from the Eigenfaces model:
* double current_threshold = model->getDouble("threshold");
* // And this line sets the threshold to 0.0:
* model->set("threshold", 0.0);
*
*
* If you've set the threshold to 0.0 as we did above, then:
*
*
* //
* Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
* // Get a prediction from the model. Note: We've set a threshold of 0.0 above,
* // since the distance is almost always larger than 0.0, you'll get -1 as
* // label, which indicates, this face is unknown
* int predicted_label = model->predict(img);
* // ...
*
*
* is going to yield -1 as predicted label, which states this face is unknown.
*
* ### Getting the name of a FaceRecognizer
*
* Since every FaceRecognizer is a Algorithm, you can use Algorithm::name to get the name of a
* FaceRecognizer:
*
*
* // Create a FaceRecognizer:
* Ptr<FaceRecognizer> model = EigenFaceRecognizer::create();
* // And here's how to get its name:
* String name = model->name();
*
*
* Member of `Face`
*/
CV_EXPORTS @interface FaceRecognizer : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::FaceRecognizer> nativePtrFaceRecognizer;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::FaceRecognizer>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::FaceRecognizer>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::face::FaceRecognizer::train(vector_Mat src, Mat labels)
//
/**
* Trains a FaceRecognizer with given data and associated labels.
*
* @param src The training images, that means the faces you want to learn. The data has to be
* given as a vector\<Mat\>.
* @param labels The labels corresponding to the images have to be given either as a vector\<int\>
* or a Mat of type CV_32SC1.
*
* The following source code snippet shows you how to learn a Fisherfaces model on a given set of
* images. The images are read with imread and pushed into a std::vector\<Mat\>. The labels of each
* image are stored within a std::vector\<int\> (you could also use a Mat of type CV_32SC1). Think of
* the label as the subject (the person) this image belongs to, so same subjects (persons) should have
* the same label. For the available FaceRecognizer you don't have to pay any attention to the order of
* the labels, just make sure same persons have the same label:
*
*
* // holds images and labels
* vector<Mat> images;
* vector<int> labels;
* // using Mat of type CV_32SC1
* // Mat labels(number_of_samples, 1, CV_32SC1);
* // images for first person
* images.push_back(imread("person0/0.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
* images.push_back(imread("person0/1.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
* images.push_back(imread("person0/2.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
* // images for second person
* images.push_back(imread("person1/0.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
* images.push_back(imread("person1/1.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
* images.push_back(imread("person1/2.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
*
*
* Now that you have read some images, we can create a new FaceRecognizer. In this example I'll create
* a Fisherfaces model and decide to keep all of the possible Fisherfaces:
*
*
* // Create a new Fisherfaces model and retain all available Fisherfaces,
* // this is the most common usage of this specific FaceRecognizer:
* //
* Ptr<FaceRecognizer> model = FisherFaceRecognizer::create();
*
*
* And finally train it on the given dataset (the face images and labels):
*
*
* // This is the common interface to train all of the available cv::FaceRecognizer
* // implementations:
* //
* model->train(images, labels);
*
*/
- (void)train:(NSArray<Mat*>*)src labels:(Mat*)labels NS_SWIFT_NAME(train(src:labels:));
//
// void cv::face::FaceRecognizer::update(vector_Mat src, Mat labels)
//
/**
* Updates a FaceRecognizer with given data and associated labels.
*
* @param src The training images, that means the faces you want to learn. The data has to be given
* as a vector\<Mat\>.
* @param labels The labels corresponding to the images have to be given either as a vector\<int\> or
* a Mat of type CV_32SC1.
*
* This method updates a (probably trained) FaceRecognizer, but only if the algorithm supports it. The
* Local Binary Patterns Histograms (LBPH) recognizer (see createLBPHFaceRecognizer) can be updated.
* For the Eigenfaces and Fisherfaces method, this is algorithmically not possible and you have to
* re-estimate the model with FaceRecognizer::train. In any case, a call to train empties the existing
* model and learns a new model, while update does not delete any model data.
*
*
* // Create a new LBPH model (it can be updated) and use the default parameters,
* // this is the most common usage of this specific FaceRecognizer:
* //
* Ptr<FaceRecognizer> model = LBPHFaceRecognizer::create();
* // This is the common interface to train all of the available cv::FaceRecognizer
* // implementations:
* //
* model->train(images, labels);
* // Some containers to hold new image:
* vector<Mat> newImages;
* vector<int> newLabels;
* // You should add some images to the containers:
* //
* // ...
* //
* // Now updating the model is as easy as calling:
* model->update(newImages,newLabels);
* // This will preserve the old model data and extend the existing model
* // with the new features extracted from newImages!
*
*
* Calling update on an Eigenfaces model (see EigenFaceRecognizer::create), which doesn't support
* updating, will throw an error similar to:
*
*
* OpenCV Error: The function/feature is not implemented (This FaceRecognizer (FaceRecognizer.Eigenfaces) does not support updating, you have to use FaceRecognizer::train to update it.) in update, file /home/philipp/git/opencv/modules/contrib/src/facerec.cpp, line 305
* terminate called after throwing an instance of 'cv::Exception'
*
*
* NOTE: The FaceRecognizer does not store your training images, because this would be very
* memory intense and it's not the responsibility of te FaceRecognizer to do so. The caller is
* responsible for maintaining the dataset, he want to work with.
*/
- (void)update:(NSArray<Mat*>*)src labels:(Mat*)labels NS_SWIFT_NAME(update(src:labels:));
//
// int cv::face::FaceRecognizer::predict(Mat src)
//
- (int)predict_label:(Mat*)src NS_SWIFT_NAME(predict(src:));
//
// void cv::face::FaceRecognizer::predict(Mat src, int& label, double& confidence)
//
/**
* Predicts a label and associated confidence (e.g. distance) for a given input image.
*
* @param src Sample image to get a prediction from.
* @param label The predicted label for the given image.
* @param confidence Associated confidence (e.g. distance) for the predicted label.
*
* The suffix const means that prediction does not affect the internal model state, so the method can
* be safely called from within different threads.
*
* The following example shows how to get a prediction from a trained model:
*
*
* using namespace cv;
* // Do your initialization here (create the cv::FaceRecognizer model) ...
* // ...
* // Read in a sample image:
* Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
* // And get a prediction from the cv::FaceRecognizer:
* int predicted = model->predict(img);
*
*
* Or to get a prediction and the associated confidence (e.g. distance):
*
*
* using namespace cv;
* // Do your initialization here (create the cv::FaceRecognizer model) ...
* // ...
* Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
* // Some variables for the predicted label and associated confidence (e.g. distance):
* int predicted_label = -1;
* double predicted_confidence = 0.0;
* // Get the prediction and associated confidence from the model
* model->predict(img, predicted_label, predicted_confidence);
*
*/
- (void)predict:(Mat*)src label:(int*)label confidence:(double*)confidence NS_SWIFT_NAME(predict(src:label:confidence:));
//
// void cv::face::FaceRecognizer::predict(Mat src, Ptr_PredictCollector collector)
//
/**
* - if implemented - send all result of prediction to collector that can be used for somehow custom result handling
* @param src Sample image to get a prediction from.
* @param collector User-defined collector object that accepts all results
*
* To implement this method u just have to do same internal cycle as in predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) but
* not try to get "best@ result, just resend it to caller side with given collector
*/
- (void)predict_collect:(Mat*)src collector:(PredictCollector*)collector NS_SWIFT_NAME(predict(src:collector:));
//
// void cv::face::FaceRecognizer::write(String filename)
//
/**
* Saves a FaceRecognizer and its model state.
*
* Saves this model to a given filename, either as XML or YAML.
* @param filename The filename to store this FaceRecognizer to (either XML/YAML).
*
* Every FaceRecognizer overwrites FaceRecognizer::save(FileStorage& fs) to save the internal model
* state. FaceRecognizer::save(const String& filename) saves the state of a model to the given
* filename.
*
* The suffix const means that prediction does not affect the internal model state, so the method can
* be safely called from within different threads.
*/
- (void)write:(NSString*)filename NS_SWIFT_NAME(write(filename:));
//
// void cv::face::FaceRecognizer::read(String filename)
//
/**
* Loads a FaceRecognizer and its model state.
*
* Loads a persisted model and state from a given XML or YAML file . Every FaceRecognizer has to
* overwrite FaceRecognizer::load(FileStorage& fs) to enable loading the model state.
* FaceRecognizer::load(FileStorage& fs) in turn gets called by
* FaceRecognizer::load(const String& filename), to ease saving a model.
*/
- (void)read:(NSString*)filename NS_SWIFT_NAME(read(filename:));
//
// void cv::face::FaceRecognizer::setLabelInfo(int label, String strInfo)
//
/**
* Sets string info for the specified model's label.
*
* The string info is replaced by the provided value if it was set before for the specified label.
*/
- (void)setLabelInfo:(int)label strInfo:(NSString*)strInfo NS_SWIFT_NAME(setLabelInfo(label:strInfo:));
//
// String cv::face::FaceRecognizer::getLabelInfo(int label)
//
/**
* Gets string information by label.
*
* If an unknown label id is provided or there is no label information associated with the specified
* label id the method returns an empty string.
*/
- (NSString*)getLabelInfo:(int)label NS_SWIFT_NAME(getLabelInfo(label:));
//
// vector_int cv::face::FaceRecognizer::getLabelsByString(String str)
//
/**
* Gets vector of labels by string.
*
* The function searches for the labels containing the specified sub-string in the associated string
* info.
*/
- (IntVector*)getLabelsByString:(NSString*)str NS_SWIFT_NAME(getLabelsByString(str:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,129 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/objdetect.hpp"
#import "opencv2/objdetect/face.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
@class Mat;
// C++: enum DisType (cv.FaceRecognizerSF.DisType)
typedef NS_ENUM(int, DisType) {
FaceRecognizerSF_FR_COSINE NS_SWIFT_NAME(FR_COSINE) = 0,
FaceRecognizerSF_FR_NORM_L2 NS_SWIFT_NAME(FR_NORM_L2) = 1
};
NS_ASSUME_NONNULL_BEGIN
// C++: class FaceRecognizerSF
/**
* DNN-based face recognizer
*
* model download link: https://github.com/opencv/opencv_zoo/tree/master/models/face_recognition_sface
*
* Member of `Objdetect`
*/
CV_EXPORTS @interface FaceRecognizerSF : NSObject
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::FaceRecognizerSF> nativePtr;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::FaceRecognizerSF>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::FaceRecognizerSF>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::FaceRecognizerSF::alignCrop(Mat src_img, Mat face_box, Mat& aligned_img)
//
/**
* Aligning image to put face on the standard position
* @param src_img input image
* @param face_box the detection result used for indicate face in input image
* @param aligned_img output aligned image
*/
- (void)alignCrop:(Mat*)src_img face_box:(Mat*)face_box aligned_img:(Mat*)aligned_img NS_SWIFT_NAME(alignCrop(src_img:face_box:aligned_img:));
//
// void cv::FaceRecognizerSF::feature(Mat aligned_img, Mat& face_feature)
//
/**
* Extracting face feature from aligned image
* @param aligned_img input aligned image
* @param face_feature output face feature
*/
- (void)feature:(Mat*)aligned_img face_feature:(Mat*)face_feature NS_SWIFT_NAME(feature(aligned_img:face_feature:));
//
// double cv::FaceRecognizerSF::match(Mat face_feature1, Mat face_feature2, int dis_type = FaceRecognizerSF::FR_COSINE)
//
/**
* Calculating the distance between two face features
* @param face_feature1 the first input feature
* @param face_feature2 the second input feature of the same size and the same type as face_feature1
* @param dis_type defining the similarity with optional values "FR_OSINE" or "FR_NORM_L2"
*/
- (double)match:(Mat*)face_feature1 face_feature2:(Mat*)face_feature2 dis_type:(int)dis_type NS_SWIFT_NAME(match(face_feature1:face_feature2:dis_type:));
/**
* Calculating the distance between two face features
* @param face_feature1 the first input feature
* @param face_feature2 the second input feature of the same size and the same type as face_feature1
*/
- (double)match:(Mat*)face_feature1 face_feature2:(Mat*)face_feature2 NS_SWIFT_NAME(match(face_feature1:face_feature2:));
//
// static Ptr_FaceRecognizerSF cv::FaceRecognizerSF::create(String model, String config, int backend_id = 0, int target_id = 0)
//
/**
* Creates an instance of this class with given parameters
* @param model the path of the onnx model used for face recognition
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param backend_id the id of backend
* @param target_id the id of target device
*/
+ (FaceRecognizerSF*)create:(NSString*)model config:(NSString*)config backend_id:(int)backend_id target_id:(int)target_id NS_SWIFT_NAME(create(model:config:backend_id:target_id:));
/**
* Creates an instance of this class with given parameters
* @param model the path of the onnx model used for face recognition
* @param config the path to the config file for compability, which is not requested for ONNX models
* @param backend_id the id of backend
*/
+ (FaceRecognizerSF*)create:(NSString*)model config:(NSString*)config backend_id:(int)backend_id NS_SWIFT_NAME(create(model:config:backend_id:));
/**
* Creates an instance of this class with given parameters
* @param model the path of the onnx model used for face recognition
* @param config the path to the config file for compability, which is not requested for ONNX models
*/
+ (FaceRecognizerSF*)create:(NSString*)model config:(NSString*)config NS_SWIFT_NAME(create(model:config:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,102 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/facemark.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class Facemark
/**
* Abstract base class for all facemark models
*
* To utilize this API in your program, please take a look at the REF: tutorial_table_of_content_facemark
* ### Description
*
* Facemark is a base class which provides universal access to any specific facemark algorithm.
* Therefore, the users should declare a desired algorithm before they can use it in their application.
*
* Here is an example on how to declare a facemark algorithm:
*
* // Using Facemark in your code:
* Ptr<Facemark> facemark = createFacemarkLBF();
*
*
* The typical pipeline for facemark detection is as follows:
* - Load the trained model using Facemark::loadModel.
* - Perform the fitting on an image via Facemark::fit.
*
* Member of `Face`
*/
CV_EXPORTS @interface Facemark : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::Facemark> nativePtrFacemark;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::Facemark>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::Facemark>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::face::Facemark::loadModel(String model)
//
/**
* A function to load the trained model before the fitting process.
* @param model A string represent the filename of a trained model.
*
* <B>Example of usage</B>
*
* facemark->loadModel("../data/lbf.model");
*
*/
- (void)loadModel:(NSString*)model NS_SWIFT_NAME(loadModel(model:));
//
// bool cv::face::Facemark::fit(Mat image, Mat faces, vector_Mat& landmarks)
//
/**
* Detect facial landmarks from an image.
* @param image Input image.
* @param faces Output of the function which represent region of interest of the detected faces.
* Each face is stored in cv::Rect container.
* @param landmarks The detected landmark points for each faces.
*
* <B>Example of usage</B>
*
* Mat image = imread("image.jpg");
* std::vector<Rect> faces;
* std::vector<std::vector<Point2f> > landmarks;
* facemark->fit(image, faces, landmarks);
*
*/
- (BOOL)fit:(Mat*)image faces:(Mat*)faces landmarks:(NSMutableArray<Mat*>*)landmarks NS_SWIFT_NAME(fit(image:faces:landmarks:));
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,50 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/facemarkAAM.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "FacemarkTrain.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class FacemarkAAM
/**
* The FacemarkAAM module
*
* Member of `Face`
*/
CV_EXPORTS @interface FacemarkAAM : FacemarkTrain
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::FacemarkAAM> nativePtrFacemarkAAM;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::FacemarkAAM>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::FacemarkAAM>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,50 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/face_alignment.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Facemark.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class FacemarkKazemi
/**
* The FacemarkKazemi module
*
* Member of `Face`
*/
CV_EXPORTS @interface FacemarkKazemi : Facemark
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::FacemarkKazemi> nativePtrFacemarkKazemi;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::FacemarkKazemi>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::FacemarkKazemi>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,50 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/facemarkLBF.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "FacemarkTrain.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class FacemarkLBF
/**
* The FacemarkLBF module
*
* Member of `Face`
*/
CV_EXPORTS @interface FacemarkLBF : FacemarkTrain
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::FacemarkLBF> nativePtrFacemarkLBF;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::FacemarkLBF>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::FacemarkLBF>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,75 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/face.hpp"
#import "opencv2/face/facemark_train.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Facemark.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class FacemarkTrain
/**
* Abstract base class for trainable facemark models
*
* To utilize this API in your program, please take a look at the REF: tutorial_table_of_content_facemark
* ### Description
*
* The AAM and LBF facemark models in OpenCV are derived from the abstract base class FacemarkTrain, which
* provides a unified access to those facemark algorithms in OpenCV.
*
* Here is an example on how to declare facemark algorithm:
*
* // Using Facemark in your code:
* Ptr<Facemark> facemark = FacemarkLBF::create();
*
*
*
* The typical pipeline for facemark detection is listed as follows:
* - (Non-mandatory) Set a user defined face detection using FacemarkTrain::setFaceDetector.
* The facemark algorithms are designed to fit the facial points into a face.
* Therefore, the face information should be provided to the facemark algorithm.
* Some algorithms might provides a default face recognition function.
* However, the users might prefer to use their own face detector to obtains the best possible detection result.
* - (Non-mandatory) Training the model for a specific algorithm using FacemarkTrain::training.
* In this case, the model should be automatically saved by the algorithm.
* If the user already have a trained model, then this part can be omitted.
* - Load the trained model using Facemark::loadModel.
* - Perform the fitting via the Facemark::fit.
*
* Member of `Face`
*/
CV_EXPORTS @interface FacemarkTrain : Facemark
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::face::FacemarkTrain> nativePtrFacemarkTrain;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::face::FacemarkTrain>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::face::FacemarkTrain>)nativePtr;
#endif
#pragma mark - Methods
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,168 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/video.hpp"
#import "opencv2/video/tracking.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "DenseOpticalFlow.h"
NS_ASSUME_NONNULL_BEGIN
// C++: class FarnebackOpticalFlow
/**
* Class computing a dense optical flow using the Gunnar Farneback's algorithm.
*
* Member of `Video`
*/
CV_EXPORTS @interface FarnebackOpticalFlow : DenseOpticalFlow
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::FarnebackOpticalFlow> nativePtrFarnebackOpticalFlow;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::FarnebackOpticalFlow>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::FarnebackOpticalFlow>)nativePtr;
#endif
#pragma mark - Methods
//
// int cv::FarnebackOpticalFlow::getNumLevels()
//
- (int)getNumLevels NS_SWIFT_NAME(getNumLevels());
//
// void cv::FarnebackOpticalFlow::setNumLevels(int numLevels)
//
- (void)setNumLevels:(int)numLevels NS_SWIFT_NAME(setNumLevels(numLevels:));
//
// double cv::FarnebackOpticalFlow::getPyrScale()
//
- (double)getPyrScale NS_SWIFT_NAME(getPyrScale());
//
// void cv::FarnebackOpticalFlow::setPyrScale(double pyrScale)
//
- (void)setPyrScale:(double)pyrScale NS_SWIFT_NAME(setPyrScale(pyrScale:));
//
// bool cv::FarnebackOpticalFlow::getFastPyramids()
//
- (BOOL)getFastPyramids NS_SWIFT_NAME(getFastPyramids());
//
// void cv::FarnebackOpticalFlow::setFastPyramids(bool fastPyramids)
//
- (void)setFastPyramids:(BOOL)fastPyramids NS_SWIFT_NAME(setFastPyramids(fastPyramids:));
//
// int cv::FarnebackOpticalFlow::getWinSize()
//
- (int)getWinSize NS_SWIFT_NAME(getWinSize());
//
// void cv::FarnebackOpticalFlow::setWinSize(int winSize)
//
- (void)setWinSize:(int)winSize NS_SWIFT_NAME(setWinSize(winSize:));
//
// int cv::FarnebackOpticalFlow::getNumIters()
//
- (int)getNumIters NS_SWIFT_NAME(getNumIters());
//
// void cv::FarnebackOpticalFlow::setNumIters(int numIters)
//
- (void)setNumIters:(int)numIters NS_SWIFT_NAME(setNumIters(numIters:));
//
// int cv::FarnebackOpticalFlow::getPolyN()
//
- (int)getPolyN NS_SWIFT_NAME(getPolyN());
//
// void cv::FarnebackOpticalFlow::setPolyN(int polyN)
//
- (void)setPolyN:(int)polyN NS_SWIFT_NAME(setPolyN(polyN:));
//
// double cv::FarnebackOpticalFlow::getPolySigma()
//
- (double)getPolySigma NS_SWIFT_NAME(getPolySigma());
//
// void cv::FarnebackOpticalFlow::setPolySigma(double polySigma)
//
- (void)setPolySigma:(double)polySigma NS_SWIFT_NAME(setPolySigma(polySigma:));
//
// int cv::FarnebackOpticalFlow::getFlags()
//
- (int)getFlags NS_SWIFT_NAME(getFlags());
//
// void cv::FarnebackOpticalFlow::setFlags(int flags)
//
- (void)setFlags:(int)flags NS_SWIFT_NAME(setFlags(flags:));
//
// static Ptr_FarnebackOpticalFlow cv::FarnebackOpticalFlow::create(int numLevels = 5, double pyrScale = 0.5, bool fastPyramids = false, int winSize = 13, int numIters = 10, int polyN = 5, double polySigma = 1.1, int flags = 0)
//
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale fastPyramids:(BOOL)fastPyramids winSize:(int)winSize numIters:(int)numIters polyN:(int)polyN polySigma:(double)polySigma flags:(int)flags NS_SWIFT_NAME(create(numLevels:pyrScale:fastPyramids:winSize:numIters:polyN:polySigma:flags:));
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale fastPyramids:(BOOL)fastPyramids winSize:(int)winSize numIters:(int)numIters polyN:(int)polyN polySigma:(double)polySigma NS_SWIFT_NAME(create(numLevels:pyrScale:fastPyramids:winSize:numIters:polyN:polySigma:));
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale fastPyramids:(BOOL)fastPyramids winSize:(int)winSize numIters:(int)numIters polyN:(int)polyN NS_SWIFT_NAME(create(numLevels:pyrScale:fastPyramids:winSize:numIters:polyN:));
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale fastPyramids:(BOOL)fastPyramids winSize:(int)winSize numIters:(int)numIters NS_SWIFT_NAME(create(numLevels:pyrScale:fastPyramids:winSize:numIters:));
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale fastPyramids:(BOOL)fastPyramids winSize:(int)winSize NS_SWIFT_NAME(create(numLevels:pyrScale:fastPyramids:winSize:));
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale fastPyramids:(BOOL)fastPyramids NS_SWIFT_NAME(create(numLevels:pyrScale:fastPyramids:));
+ (FarnebackOpticalFlow*)create:(int)numLevels pyrScale:(double)pyrScale NS_SWIFT_NAME(create(numLevels:pyrScale:));
+ (FarnebackOpticalFlow*)create:(int)numLevels NS_SWIFT_NAME(create(numLevels:));
+ (FarnebackOpticalFlow*)create NS_SWIFT_NAME(create());
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,69 @@
//
// This file is auto-generated. Please don't modify it!
//
#pragma once
#ifdef __cplusplus
//#import "opencv.hpp"
#import "opencv2/ximgproc.hpp"
#import "opencv2/ximgproc/edge_filter.hpp"
#else
#define CV_EXPORTS
#endif
#import <Foundation/Foundation.h>
#import "Algorithm.h"
@class Mat;
NS_ASSUME_NONNULL_BEGIN
// C++: class FastBilateralSolverFilter
/**
* Interface for implementations of Fast Bilateral Solver.
*
* For more details about this solver see CITE: BarronPoole2016 .
*
* Member of `Ximgproc`
*/
CV_EXPORTS @interface FastBilateralSolverFilter : Algorithm
#ifdef __cplusplus
@property(readonly)cv::Ptr<cv::ximgproc::FastBilateralSolverFilter> nativePtrFastBilateralSolverFilter;
#endif
#ifdef __cplusplus
- (instancetype)initWithNativePtr:(cv::Ptr<cv::ximgproc::FastBilateralSolverFilter>)nativePtr;
+ (instancetype)fromNative:(cv::Ptr<cv::ximgproc::FastBilateralSolverFilter>)nativePtr;
#endif
#pragma mark - Methods
//
// void cv::ximgproc::FastBilateralSolverFilter::filter(Mat src, Mat confidence, Mat& dst)
//
/**
* Apply smoothing operation to the source image.
*
* @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 3 channels.
*
* @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
*
* @param dst destination image.
*
* NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
*/
- (void)filter:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst NS_SWIFT_NAME(filter(src:confidence:dst:));
@end
NS_ASSUME_NONNULL_END

Some files were not shown because too many files have changed in this diff Show More