Home | History | Annotate | Download | only in opencv2
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                           License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
     14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
     15 // Third party copyrights are property of their respective owners.
     16 //
     17 // Redistribution and use in source and binary forms, with or without modification,
     18 // are permitted provided that the following conditions are met:
     19 //
     20 //   * Redistribution's of source code must retain the above copyright notice,
     21 //     this list of conditions and the following disclaimer.
     22 //
     23 //   * Redistribution's in binary form must reproduce the above copyright notice,
     24 //     this list of conditions and the following disclaimer in the documentation
     25 //     and/or other materials provided with the distribution.
     26 //
     27 //   * The name of the copyright holders may not be used to endorse or promote products
     28 //     derived from this software without specific prior written permission.
     29 //
     30 // This software is provided by the copyright holders and contributors "as is" and
     31 // any express or implied warranties, including, but not limited to, the implied
     32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     33 // In no event shall the Intel Corporation or contributors be liable for any direct,
     34 // indirect, incidental, special, exemplary, or consequential damages
     35 // (including, but not limited to, procurement of substitute goods or services;
     36 // loss of use, data, or profits; or business interruption) however caused
     37 // and on any theory of liability, whether in contract, strict liability,
     38 // or tort (including negligence or otherwise) arising in any way out of
     39 // the use of this software, even if advised of the possibility of such damage.
     40 //
     41 //M*/
     42 
     43 #ifndef __OPENCV_CUDALEGACY_HPP__
     44 #define __OPENCV_CUDALEGACY_HPP__
     45 
     46 #include "opencv2/core/cuda.hpp"
     47 #include "opencv2/cudalegacy/NCV.hpp"
     48 #include "opencv2/cudalegacy/NPP_staging.hpp"
     49 #include "opencv2/cudalegacy/NCVPyramid.hpp"
     50 #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
     51 #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
     52 #include "opencv2/video/background_segm.hpp"
     53 
     54 /**
     55   @addtogroup cuda
     56   @{
     57     @defgroup cudalegacy Legacy support
     58   @}
     59 */
     60 
     61 namespace cv { namespace cuda {
     62 
     63 //! @addtogroup cudalegacy
     64 //! @{
     65 
     66 //
     67 // ImagePyramid
     68 //
     69 
     70 class CV_EXPORTS ImagePyramid : public Algorithm
     71 {
     72 public:
     73     virtual void getLayer(OutputArray outImg, Size outRoi, Stream& stream = Stream::Null()) const = 0;
     74 };
     75 
     76 CV_EXPORTS Ptr<ImagePyramid> createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null());
     77 
     78 //
     79 // GMG
     80 //
     81 
     82 /** @brief Background/Foreground Segmentation Algorithm.
     83 
     84 The class discriminates between foreground and background pixels by building and maintaining a model
     85 of the background. Any pixel which does not fit this model is then deemed to be foreground. The
     86 class implements algorithm described in @cite Gold2012 .
     87  */
     88 class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
     89 {
     90 public:
     91     using cv::BackgroundSubtractor::apply;
     92     virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
     93 
     94     virtual int getMaxFeatures() const = 0;
     95     virtual void setMaxFeatures(int maxFeatures) = 0;
     96 
     97     virtual double getDefaultLearningRate() const = 0;
     98     virtual void setDefaultLearningRate(double lr) = 0;
     99 
    100     virtual int getNumFrames() const = 0;
    101     virtual void setNumFrames(int nframes) = 0;
    102 
    103     virtual int getQuantizationLevels() const = 0;
    104     virtual void setQuantizationLevels(int nlevels) = 0;
    105 
    106     virtual double getBackgroundPrior() const = 0;
    107     virtual void setBackgroundPrior(double bgprior) = 0;
    108 
    109     virtual int getSmoothingRadius() const = 0;
    110     virtual void setSmoothingRadius(int radius) = 0;
    111 
    112     virtual double getDecisionThreshold() const = 0;
    113     virtual void setDecisionThreshold(double thresh) = 0;
    114 
    115     virtual bool getUpdateBackgroundModel() const = 0;
    116     virtual void setUpdateBackgroundModel(bool update) = 0;
    117 
    118     virtual double getMinVal() const = 0;
    119     virtual void setMinVal(double val) = 0;
    120 
    121     virtual double getMaxVal() const = 0;
    122     virtual void setMaxVal(double val) = 0;
    123 };
    124 
    125 /** @brief Creates GMG Background Subtractor
    126 
    127 @param initializationFrames Number of frames of video to use to initialize histograms.
    128 @param decisionThreshold Value above which pixel is determined to be FG.
    129  */
    130 CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
    131     createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
    132 
    133 //
    134 // FGD
    135 //
    136 
    137 /** @brief The class discriminates between foreground and background pixels by building and maintaining a model
    138 of the background.
    139 
    140 Any pixel which does not fit this model is then deemed to be foreground. The class implements
    141 algorithm described in @cite FGD2003 .
    142 @sa BackgroundSubtractor
    143  */
    144 class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
    145 {
    146 public:
    147     /** @brief Returns the output foreground regions calculated by findContours.
    148 
    149     @param foreground_regions Output array (CPU memory).
    150      */
    151     virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
    152 };
    153 
    154 struct CV_EXPORTS FGDParams
    155 {
    156     int Lc;  //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
    157     int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
    158     int N2c; //!< Number of color vectors retained at given pixel.  Must be > N1c, typically ~ 5/3 of N1c.
    159     //!< Used to allow the first N1c vectors to adapt over time to changing background.
    160 
    161     int Lcc;  //!< Quantized levels per 'color co-occurrence' component.  Power of two, typically 16, 32 or 64.
    162     int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
    163     int N2cc; //!< Number of color co-occurrence vectors retained at given pixel.  Must be > N1cc, typically ~ 5/3 of N1cc.
    164     //!< Used to allow the first N1cc vectors to adapt over time to changing background.
    165 
    166     bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
    167     int perform_morphing;     //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
    168     //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
    169 
    170     float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
    171     float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
    172     float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
    173 
    174     float delta;   //!< Affects color and color co-occurrence quantization, typically set to 2.
    175     float T;       //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
    176     float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
    177 
    178     //! default Params
    179     FGDParams();
    180 };
    181 
    182 /** @brief Creates FGD Background Subtractor
    183 
    184 @param params Algorithm's parameters. See @cite FGD2003 for explanation.
    185  */
    186 CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
    187     createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
    188 
    189 //
    190 // Optical flow
    191 //
    192 
    193 //! Calculates optical flow for 2 images using block matching algorithm */
    194 CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
    195                                   Size block_size, Size shift_size, Size max_range, bool use_previous,
    196                                   GpuMat& velx, GpuMat& vely, GpuMat& buf,
    197                                   Stream& stream = Stream::Null());
    198 
    199 class CV_EXPORTS FastOpticalFlowBM
    200 {
    201 public:
    202     void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
    203 
    204 private:
    205     GpuMat buffer;
    206     GpuMat extended_I0;
    207     GpuMat extended_I1;
    208 };
    209 
    210 /** @brief Interpolates frames (images) using provided optical flow (displacement field).
    211 
    212 @param frame0 First frame (32-bit floating point images, single channel).
    213 @param frame1 Second frame. Must have the same type and size as frame0 .
    214 @param fu Forward horizontal displacement.
    215 @param fv Forward vertical displacement.
    216 @param bu Backward horizontal displacement.
    217 @param bv Backward vertical displacement.
    218 @param pos New frame position.
    219 @param newFrame Output image.
    220 @param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
    221 GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
    222 horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
    223 interpolated backward vertical flow.
    224 @param stream Stream for the asynchronous version.
    225  */
    226 CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
    227                                   const GpuMat& fu, const GpuMat& fv,
    228                                   const GpuMat& bu, const GpuMat& bv,
    229                                   float pos, GpuMat& newFrame, GpuMat& buf,
    230                                   Stream& stream = Stream::Null());
    231 
    232 CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
    233 
    234 //
    235 // Labeling
    236 //
    237 
    238 //!performs labeling via graph cuts of a 2D regular 4-connected graph.
    239 CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels,
    240                          GpuMat& buf, Stream& stream = Stream::Null());
    241 
    242 //!performs labeling via graph cuts of a 2D regular 8-connected graph.
    243 CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
    244                          GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight,
    245                          GpuMat& labels,
    246                          GpuMat& buf, Stream& stream = Stream::Null());
    247 
    248 //! compute mask for Generalized Flood fill componetns labeling.
    249 CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null());
    250 
    251 //! performs connected componnents labeling.
    252 CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null());
    253 
    254 //
    255 // Calib3d
    256 //
    257 
    258 CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
    259                                 GpuMat& dst, Stream& stream = Stream::Null());
    260 
    261 CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
    262                               const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
    263                               Stream& stream = Stream::Null());
    264 
    265 /** @brief Finds the object pose from 3D-2D point correspondences.
    266 
    267 @param object Single-row matrix of object points.
    268 @param image Single-row matrix of image points.
    269 @param camera_mat 3x3 matrix of intrinsic camera parameters.
    270 @param dist_coef Distortion coefficients. See undistortPoints for details.
    271 @param rvec Output 3D rotation vector.
    272 @param tvec Output 3D translation vector.
    273 @param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
    274 initial transformation guess. It is not supported for now.
    275 @param num_iters Maximum number of RANSAC iterations.
    276 @param max_dist Euclidean distance threshold to detect whether point is inlier or not.
    277 @param min_inlier_count Flag to indicate that the function must stop if greater or equal number
    278 of inliers is achieved. It is not supported for now.
    279 @param inliers Output vector of inlier indices.
    280  */
    281 CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
    282                                const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
    283                                int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
    284                                std::vector<int>* inliers=NULL);
    285 
    286 //! @}
    287 
    288 }}
    289 
    290 #endif /* __OPENCV_CUDALEGACY_HPP__ */
    291