Home | History | Annotate | Download | only in src
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                          License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
     14 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
     15 // Third party copyrights are property of their respective owners.
     16 //
     17 // Redistribution and use in source and binary forms, with or without modification,
     18 // are permitted provided that the following conditions are met:
     19 //
     20 //   * Redistribution's of source code must retain the above copyright notice,
     21 //     this list of conditions and the following disclaimer.
     22 //
     23 //   * Redistribution's in binary form must reproduce the above copyright notice,
     24 //     this list of conditions and the following disclaimer in the documentation
     25 //     and/or other materials provided with the distribution.
     26 //
     27 //   * The name of the copyright holders may not be used to endorse or promote products
     28 //     derived from this software without specific prior written permission.
     29 //
     30 // This software is provided by the copyright holders and contributors "as is" and
     31 // any express or implied warranties, including, but not limited to, the implied
     32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     33 // In no event shall the Intel Corporation or contributors be liable for any direct,
     34 // indirect, incidental, special, exemplary, or consequential damages
     35 // (including, but not limited to, procurement of substitute goods or services;
     36 // loss of use, data, or profits; or business interruption) however caused
     37 // and on any theory of liability, whether in contract, strict liability,
     38 // or tort (including negligence or otherwise) arising in any way out of
     39 // the use of this software, even if advised of the possibility of such damage.
     40 //
     41 //M*/
     42 //#include <math.h>
     43 
     44 #include "precomp.hpp"
     45 
     46 namespace cv
     47 {
     48 
     49 /*!
     50  The class implements the following algorithm:
     51  "Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction"
     52  Z.Zivkovic, F. van der Heijden
     53  Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006
     54  http://www.zoranz.net/Publications/zivkovicPRL2006.pdf
     55 */
     56 
     57 // default parameters of gaussian background detection algorithm
     58 static const int defaultHistory2 = 500; // Learning rate; alpha = 1/defaultHistory2
     59 static const int defaultNsamples = 7; // number of samples saved in memory
     60 static const float defaultDist2Threshold = 20.0f*20.0f;//threshold on distance from the sample
     61 
     62 // additional parameters
     63 static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
     64 static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
     65 
     66 class BackgroundSubtractorKNNImpl : public BackgroundSubtractorKNN
     67 {
     68 public:
     69     //! the default constructor
     70     BackgroundSubtractorKNNImpl()
     71     {
     72     frameSize = Size(0,0);
     73     frameType = 0;
     74     nframes = 0;
     75     history = defaultHistory2;
     76 
     77     //set parameters
     78     // N - the number of samples stored in memory per model
     79     nN = defaultNsamples;
     80 
     81     //kNN - k nearest neighbour - number on NN for detecting background - default K=[0.1*nN]
     82     nkNN=MAX(1,cvRound(0.1*nN*3+0.40));
     83 
     84     //Tb - Threshold Tb*kernelwidth
     85     fTb = defaultDist2Threshold;
     86 
     87     // Shadow detection
     88     bShadowDetection = 1;//turn on
     89     nShadowDetection =  defaultnShadowDetection2;
     90     fTau = defaultfTau;// Tau - shadow threshold
     91     name_ = "BackgroundSubtractor.KNN";
     92     }
     93     //! the full constructor that takes the length of the history,
     94     // the number of gaussian mixtures, the background ratio parameter and the noise strength
     95     BackgroundSubtractorKNNImpl(int _history,  float _dist2Threshold, bool _bShadowDetection=true)
     96     {
     97     frameSize = Size(0,0);
     98     frameType = 0;
     99 
    100     nframes = 0;
    101     history = _history > 0 ? _history : defaultHistory2;
    102 
    103     //set parameters
    104     // N - the number of samples stored in memory per model
    105     nN = defaultNsamples;
    106     //kNN - k nearest neighbour - number on NN for detcting background - default K=[0.1*nN]
    107     nkNN=MAX(1,cvRound(0.1*nN*3+0.40));
    108 
    109     //Tb - Threshold Tb*kernelwidth
    110     fTb = _dist2Threshold>0? _dist2Threshold : defaultDist2Threshold;
    111 
    112     bShadowDetection = _bShadowDetection;
    113     nShadowDetection =  defaultnShadowDetection2;
    114     fTau = defaultfTau;
    115     name_ = "BackgroundSubtractor.KNN";
    116     }
    117     //! the destructor
    118     ~BackgroundSubtractorKNNImpl() {}
    119     //! the update operator
    120     void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
    121 
    122     //! computes a background image which are the mean of all background gaussians
    123     virtual void getBackgroundImage(OutputArray backgroundImage) const;
    124 
    125     //! re-initiaization method
    126     void initialize(Size _frameSize, int _frameType)
    127     {
    128     frameSize = _frameSize;
    129     frameType = _frameType;
    130     nframes = 0;
    131 
    132     int nchannels = CV_MAT_CN(frameType);
    133     CV_Assert( nchannels <= CV_CN_MAX );
    134 
    135     // Reserve memory for the model
    136     int size=frameSize.height*frameSize.width;
    137     // for each sample of 3 speed pixel models each pixel bg model we store ...
    138     // values + flag (nchannels+1 values)
    139     bgmodel.create( 1,(nN * 3) * (nchannels+1)* size,CV_8U);
    140 
    141     //index through the three circular lists
    142     aModelIndexShort.create(1,size,CV_8U);
    143     aModelIndexMid.create(1,size,CV_8U);
    144     aModelIndexLong.create(1,size,CV_8U);
    145     //when to update next
    146     nNextShortUpdate.create(1,size,CV_8U);
    147     nNextMidUpdate.create(1,size,CV_8U);
    148     nNextLongUpdate.create(1,size,CV_8U);
    149 
    150     //Reset counters
    151     nShortCounter = 0;
    152     nMidCounter = 0;
    153     nLongCounter = 0;
    154 
    155     aModelIndexShort = Scalar::all(0);//random? //((m_nN)*rand())/(RAND_MAX+1);//0...m_nN-1
    156     aModelIndexMid = Scalar::all(0);
    157     aModelIndexLong = Scalar::all(0);
    158     nNextShortUpdate = Scalar::all(0);
    159     nNextMidUpdate = Scalar::all(0);
    160     nNextLongUpdate = Scalar::all(0);
    161     }
    162 
    163     virtual int getHistory() const { return history; }
    164     virtual void setHistory(int _nframes) { history = _nframes; }
    165 
    166     virtual int getNSamples() const { return nN; }
    167     virtual void setNSamples(int _nN) { nN = _nN; }//needs reinitialization!
    168 
    169     virtual int getkNNSamples() const { return nkNN; }
    170     virtual void setkNNSamples(int _nkNN) { nkNN = _nkNN; }
    171 
    172     virtual double getDist2Threshold() const { return fTb; }
    173     virtual void setDist2Threshold(double _dist2Threshold) { fTb = (float)_dist2Threshold; }
    174 
    175     virtual bool getDetectShadows() const { return bShadowDetection; }
    176     virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; }
    177 
    178     virtual int getShadowValue() const { return nShadowDetection; }
    179     virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; }
    180 
    181     virtual double getShadowThreshold() const { return fTau; }
    182     virtual void setShadowThreshold(double value) { fTau = (float)value; }
    183 
    184     virtual void write(FileStorage& fs) const
    185     {
    186         fs << "name" << name_
    187         << "history" << history
    188         << "nsamples" << nN
    189         << "nKNN" << nkNN
    190         << "dist2Threshold" << fTb
    191         << "detectShadows" << (int)bShadowDetection
    192         << "shadowValue" << (int)nShadowDetection
    193         << "shadowThreshold" << fTau;
    194     }
    195 
    196     virtual void read(const FileNode& fn)
    197     {
    198         CV_Assert( (String)fn["name"] == name_ );
    199         history = (int)fn["history"];
    200         nN = (int)fn["nsamples"];
    201         nkNN = (int)fn["nKNN"];
    202         fTb = (float)fn["dist2Threshold"];
    203         bShadowDetection = (int)fn["detectShadows"] != 0;
    204         nShadowDetection = saturate_cast<uchar>((int)fn["shadowValue"]);
    205         fTau = (float)fn["shadowThreshold"];
    206     }
    207 
    208 protected:
    209     Size frameSize;
    210     int frameType;
    211     int nframes;
    212     /////////////////////////
    213     //very important parameters - things you will change
    214     ////////////////////////
    215     int history;
    216     //alpha=1/history - speed of update - if the time interval you want to average over is T
    217     //set alpha=1/history. It is also usefull at start to make T slowly increase
    218     //from 1 until the desired T
    219     float fTb;
    220     //Tb - threshold on the squared distance from the sample used to decide if it is well described
    221     //by the background model or not. A typical value could be 2 sigma
    222     //and that is Tb=2*2*10*10 =400; where we take typical pixel level sigma=10
    223 
    224     /////////////////////////
    225     //less important parameters - things you might change but be carefull
    226     ////////////////////////
    227     int nN;//totlal number of samples
    228     int nkNN;//number on NN for detcting background - default K=[0.1*nN]
    229 
    230     //shadow detection parameters
    231     bool bShadowDetection;//default 1 - do shadow detection
    232     unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
    233     float fTau;
    234     // Tau - shadow threshold. The shadow is detected if the pixel is darker
    235     //version of the background. Tau is a threshold on how much darker the shadow can be.
    236     //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
    237     //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
    238 
    239     //model data
    240     int nLongCounter;//circular counter
    241     int nMidCounter;
    242     int nShortCounter;
    243     Mat bgmodel; // model data pixel values
    244     Mat aModelIndexShort;// index into the models
    245     Mat aModelIndexMid;
    246     Mat aModelIndexLong;
    247     Mat nNextShortUpdate;//random update points per model
    248     Mat nNextMidUpdate;
    249     Mat nNextLongUpdate;
    250 
    251     String name_;
    252 };
    253 
    254 //{ to do - paralelization ...
    255 //struct KNNInvoker....
    256 CV_INLINE void
    257         _cvUpdatePixelBackgroundNP(	long pixel,const uchar* data, int nchannels, int m_nN,
    258         uchar* m_aModel,
    259         uchar* m_nNextLongUpdate,
    260         uchar* m_nNextMidUpdate,
    261         uchar* m_nNextShortUpdate,
    262         uchar* m_aModelIndexLong,
    263         uchar* m_aModelIndexMid,
    264         uchar* m_aModelIndexShort,
    265         int m_nLongCounter,
    266         int m_nMidCounter,
    267         int m_nShortCounter,
    268         int m_nLongUpdate,
    269         int m_nMidUpdate,
    270         int m_nShortUpdate,
    271         uchar include
    272         )
    273 {
    274     // hold the offset
    275     int ndata=1+nchannels;
    276     long offsetLong =  ndata * (pixel * m_nN * 3 + m_aModelIndexLong[pixel] + m_nN * 2);
    277     long offsetMid =   ndata * (pixel * m_nN * 3 + m_aModelIndexMid[pixel]  + m_nN * 1);
    278     long offsetShort = ndata * (pixel * m_nN * 3 + m_aModelIndexShort[pixel]);
    279 
    280     // Long update?
    281     if (m_nNextLongUpdate[pixel] == m_nLongCounter)
    282     {
    283         // add the oldest pixel from Mid to the list of values (for each color)
    284         memcpy(&m_aModel[offsetLong],&m_aModel[offsetMid],ndata*sizeof(unsigned char));
    285         // increase the index
    286         m_aModelIndexLong[pixel] = (m_aModelIndexLong[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexLong[pixel] + 1);
    287     };
    288     if (m_nLongCounter == (m_nLongUpdate-1))
    289     {
    290         //m_nNextLongUpdate[pixel] = (uchar)(((m_nLongUpdate)*(rand()-1))/RAND_MAX);//0,...m_nLongUpdate-1;
    291         m_nNextLongUpdate[pixel] = (uchar)( rand() % m_nLongUpdate );//0,...m_nLongUpdate-1;
    292     };
    293 
    294     // Mid update?
    295     if (m_nNextMidUpdate[pixel] == m_nMidCounter)
    296     {
    297         // add this pixel to the list of values (for each color)
    298         memcpy(&m_aModel[offsetMid],&m_aModel[offsetShort],ndata*sizeof(unsigned char));
    299         // increase the index
    300         m_aModelIndexMid[pixel] = (m_aModelIndexMid[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexMid[pixel] + 1);
    301     };
    302     if (m_nMidCounter == (m_nMidUpdate-1))
    303     {
    304         m_nNextMidUpdate[pixel] = (uchar)( rand() % m_nMidUpdate );
    305     };
    306 
    307     // Short update?
    308     if (m_nNextShortUpdate[pixel] == m_nShortCounter)
    309     {
    310         // add this pixel to the list of values (for each color)
    311         memcpy(&m_aModel[offsetShort],data,ndata*sizeof(unsigned char));
    312         //set the include flag
    313         m_aModel[offsetShort+nchannels]=include;
    314         // increase the index
    315         m_aModelIndexShort[pixel] = (m_aModelIndexShort[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexShort[pixel] + 1);
    316     };
    317     if (m_nShortCounter == (m_nShortUpdate-1))
    318     {
    319         m_nNextShortUpdate[pixel] = (uchar)( rand() % m_nShortUpdate );
    320     };
    321 };
    322 
    323 CV_INLINE int
    324         _cvCheckPixelBackgroundNP(long pixel,
    325         const uchar* data, int nchannels,
    326         int m_nN,
    327         uchar* m_aModel,
    328         float m_fTb,
    329         int m_nkNN,
    330         float tau,
    331         int m_nShadowDetection,
    332         uchar& include)
    333 {
    334     int Pbf = 0; // the total probability that this pixel is background
    335     int Pb = 0; //background model probability
    336     float dData[CV_CN_MAX];
    337 
    338     //uchar& include=data[nchannels];
    339     include=0;//do we include this pixel into background model?
    340 
    341     int ndata=nchannels+1;
    342     long posPixel = pixel * ndata * m_nN * 3;
    343 //	float k;
    344     // now increase the probability for each pixel
    345     for (int n = 0; n < m_nN*3; n++)
    346     {
    347         uchar* mean_m = &m_aModel[posPixel + n*ndata];
    348 
    349         //calculate difference and distance
    350         float dist2;
    351 
    352         if( nchannels == 3 )
    353         {
    354             dData[0] = (float)mean_m[0] - data[0];
    355             dData[1] = (float)mean_m[1] - data[1];
    356             dData[2] = (float)mean_m[2] - data[2];
    357             dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2];
    358         }
    359         else
    360         {
    361             dist2 = 0.f;
    362             for( int c = 0; c < nchannels; c++ )
    363             {
    364                 dData[c] = (float)mean_m[c] - data[c];
    365                 dist2 += dData[c]*dData[c];
    366             }
    367         }
    368 
    369         if (dist2<m_fTb)
    370         {
    371             Pbf++;//all
    372             //background only
    373             //if(m_aModel[subPosPixel + nchannels])//indicator
    374             if(mean_m[nchannels])//indicator
    375             {
    376                 Pb++;
    377                 if (Pb >= m_nkNN)//Tb
    378                 {
    379                     include=1;//include
    380                     return 1;//background ->exit
    381                 };
    382             }
    383         };
    384     };
    385 
    386     //include?
    387     if (Pbf>=m_nkNN)//m_nTbf)
    388     {
    389         include=1;
    390     }
    391 
    392     int Ps = 0; // the total probability that this pixel is background shadow
    393     // Detected as moving object, perform shadow detection
    394     if (m_nShadowDetection)
    395     {
    396         for (int n = 0; n < m_nN*3; n++)
    397         {
    398             //long subPosPixel = posPixel + n*ndata;
    399             uchar* mean_m = &m_aModel[posPixel + n*ndata];
    400 
    401             if(mean_m[nchannels])//check only background
    402             {
    403                 float numerator = 0.0f;
    404                 float denominator = 0.0f;
    405                 for( int c = 0; c < nchannels; c++ )
    406                 {
    407                     numerator   += (float)data[c] * mean_m[c];
    408                     denominator += (float)mean_m[c] * mean_m[c];
    409                 }
    410 
    411                 // no division by zero allowed
    412                 if( denominator == 0 )
    413                     return 0;
    414 
    415                 // if tau < a < 1 then also check the color distortion
    416                 if( numerator <= denominator && numerator >= tau*denominator )
    417                 {
    418                     float a = numerator / denominator;
    419                     float dist2a = 0.0f;
    420 
    421                     for( int c = 0; c < nchannels; c++ )
    422                     {
    423                         float dD= a*mean_m[c] - data[c];
    424                         dist2a += dD*dD;
    425                     }
    426 
    427                     if (dist2a<m_fTb*a*a)
    428                     {
    429                         Ps++;
    430                         if (Ps >= m_nkNN)//shadow
    431                             return 2;
    432                     };
    433                 };
    434             };
    435         };
    436     }
    437     return 0;
    438 };
    439 
    440 CV_INLINE void
    441         icvUpdatePixelBackgroundNP(const Mat& _src, Mat& _dst,
    442         Mat& _bgmodel,
    443         Mat& _nNextLongUpdate,
    444         Mat& _nNextMidUpdate,
    445         Mat& _nNextShortUpdate,
    446         Mat& _aModelIndexLong,
    447         Mat& _aModelIndexMid,
    448         Mat& _aModelIndexShort,
    449         int& _nLongCounter,
    450         int& _nMidCounter,
    451         int& _nShortCounter,
    452         int _nN,
    453         float _fAlphaT,
    454         float _fTb,
    455         int _nkNN,
    456         float _fTau,
    457         int _bShadowDetection,
    458         uchar nShadowDetection
    459         )
    460 {
    461     int nchannels = CV_MAT_CN(_src.type());
    462 
    463     //model
    464     uchar* m_aModel=_bgmodel.ptr(0);
    465     uchar* m_nNextLongUpdate=_nNextLongUpdate.ptr(0);
    466     uchar* m_nNextMidUpdate=_nNextMidUpdate.ptr(0);
    467     uchar* m_nNextShortUpdate=_nNextShortUpdate.ptr(0);
    468     uchar* m_aModelIndexLong=_aModelIndexLong.ptr(0);
    469     uchar* m_aModelIndexMid=_aModelIndexMid.ptr(0);
    470     uchar* m_aModelIndexShort=_aModelIndexShort.ptr(0);
    471 
    472     //some constants
    473     int m_nN=_nN;
    474     float m_fAlphaT=_fAlphaT;
    475     float m_fTb=_fTb;//Tb - threshold on the distance
    476     float m_fTau=_fTau;
    477     int m_nkNN=_nkNN;
    478     int m_bShadowDetection=_bShadowDetection;
    479 
    480     //recalculate update rates - in case alpha is changed
    481     // calculate update parameters (using alpha)
    482     int Kshort,Kmid,Klong;
    483     //approximate exponential learning curve
    484     Kshort=(int)(log(0.7)/log(1-m_fAlphaT))+1;//Kshort
    485     Kmid=(int)(log(0.4)/log(1-m_fAlphaT))-Kshort+1;//Kmid
    486     Klong=(int)(log(0.1)/log(1-m_fAlphaT))-Kshort-Kmid+1;//Klong
    487 
    488     //refresh rates
    489     int	m_nShortUpdate = (Kshort/m_nN)+1;
    490     int m_nMidUpdate = (Kmid/m_nN)+1;
    491     int m_nLongUpdate = (Klong/m_nN)+1;
    492 
    493     //int	m_nShortUpdate = MAX((Kshort/m_nN),m_nN);
    494     //int m_nMidUpdate = MAX((Kmid/m_nN),m_nN);
    495     //int m_nLongUpdate = MAX((Klong/m_nN),m_nN);
    496 
    497     //update counters for the refresh rate
    498     int m_nLongCounter=_nLongCounter;
    499     int m_nMidCounter=_nMidCounter;
    500     int m_nShortCounter=_nShortCounter;
    501 
    502     _nShortCounter++;//0,1,...,m_nShortUpdate-1
    503     _nMidCounter++;
    504     _nLongCounter++;
    505     if (_nShortCounter >= m_nShortUpdate) _nShortCounter = 0;
    506     if (_nMidCounter >= m_nMidUpdate) _nMidCounter = 0;
    507     if (_nLongCounter >= m_nLongUpdate) _nLongCounter = 0;
    508 
    509     //go through the image
    510     long i = 0;
    511     for (long y = 0; y < _src.rows; y++)
    512     {
    513         for (long x = 0; x < _src.cols; x++)
    514         {
    515             const uchar* data = _src.ptr((int)y, (int)x);
    516 
    517             //update model+ background subtract
    518             uchar include=0;
    519             int result= _cvCheckPixelBackgroundNP(i, data, nchannels,
    520                     m_nN, m_aModel, m_fTb,m_nkNN, m_fTau,m_bShadowDetection,include);
    521 
    522             _cvUpdatePixelBackgroundNP(i,data,nchannels,
    523                     m_nN, m_aModel,
    524                     m_nNextLongUpdate,
    525                     m_nNextMidUpdate,
    526                     m_nNextShortUpdate,
    527                     m_aModelIndexLong,
    528                     m_aModelIndexMid,
    529                     m_aModelIndexShort,
    530                     m_nLongCounter,
    531                     m_nMidCounter,
    532                     m_nShortCounter,
    533                     m_nLongUpdate,
    534                     m_nMidUpdate,
    535                     m_nShortUpdate,
    536                     include
    537                     );
    538             switch (result)
    539             {
    540                 case 0:
    541                     //foreground
    542                     *_dst.ptr((int)y, (int)x) = 255;
    543                     break;
    544                 case 1:
    545                     //background
    546                     *_dst.ptr((int)y, (int)x) = 0;
    547                     break;
    548                 case 2:
    549                     //shadow
    550                     *_dst.ptr((int)y, (int)x) = nShadowDetection;
    551                     break;
    552             }
    553             i++;
    554         }
    555     }
    556 };
    557 
    558 
    559 
    560 void BackgroundSubtractorKNNImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
    561 {
    562     Mat image = _image.getMat();
    563     bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
    564 
    565     if( needToInitialize )
    566         initialize(image.size(), image.type());
    567 
    568     _fgmask.create( image.size(), CV_8U );
    569     Mat fgmask = _fgmask.getMat();
    570 
    571     ++nframes;
    572     learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history );
    573     CV_Assert(learningRate >= 0);
    574 
    575     //parallel_for_(Range(0, image.rows),
    576     //              KNNInvoker(image, fgmask,
    577     icvUpdatePixelBackgroundNP(image, fgmask,
    578             bgmodel,
    579             nNextLongUpdate,
    580             nNextMidUpdate,
    581             nNextShortUpdate,
    582             aModelIndexLong,
    583             aModelIndexMid,
    584             aModelIndexShort,
    585             nLongCounter,
    586             nMidCounter,
    587             nShortCounter,
    588             nN,
    589             (float)learningRate,
    590             fTb,
    591             nkNN,
    592             fTau,
    593             bShadowDetection,
    594             nShadowDetection
    595             );
    596 }
    597 
    598 void BackgroundSubtractorKNNImpl::getBackgroundImage(OutputArray backgroundImage) const
    599 {
    600     int nchannels = CV_MAT_CN(frameType);
    601     //CV_Assert( nchannels == 3 );
    602     Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0));
    603 
    604     int ndata=nchannels+1;
    605     int modelstep=(ndata * nN * 3);
    606 
    607     const uchar* pbgmodel=bgmodel.ptr(0);
    608     for(int row=0; row<meanBackground.rows; row++)
    609     {
    610         for(int col=0; col<meanBackground.cols; col++)
    611         {
    612             for (int n = 0; n < nN*3; n++)
    613             {
    614                 const uchar* mean_m = &pbgmodel[n*ndata];
    615                 if (mean_m[nchannels])
    616                 {
    617                     meanBackground.at<Vec3b>(row, col) = Vec3b(mean_m);
    618                     break;
    619                 }
    620             }
    621             pbgmodel=pbgmodel+modelstep;
    622         }
    623     }
    624 
    625     switch(CV_MAT_CN(frameType))
    626     {
    627         case 1:
    628         {
    629             std::vector<Mat> channels;
    630             split(meanBackground, channels);
    631             channels[0].copyTo(backgroundImage);
    632             break;
    633         }
    634         case 3:
    635         {
    636             meanBackground.copyTo(backgroundImage);
    637             break;
    638         }
    639         default:
    640             CV_Error(Error::StsUnsupportedFormat, "");
    641     }
    642 }
    643 
    644 
    645 Ptr<BackgroundSubtractorKNN> createBackgroundSubtractorKNN(int _history, double _threshold2,
    646                                                            bool _bShadowDetection)
    647 {
    648     return makePtr<BackgroundSubtractorKNNImpl>(_history, (float)_threshold2, _bShadowDetection);
    649 }
    650 
    651 }
    652 
    653 /* End of file. */
    654