Home | History | Annotate | Download | only in src
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                           License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
     14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
     15 // Third party copyrights are property of their respective owners.
     16 //
     17 // Redistribution and use in source and binary forms, with or without modification,
     18 // are permitted provided that the following conditions are met:
     19 //
     20 //   * Redistribution's of source code must retain the above copyright notice,
     21 //     this list of conditions and the following disclaimer.
     22 //
     23 //   * Redistribution's in binary form must reproduce the above copyright notice,
     24 //     this list of conditions and the following disclaimer in the documentation
     25 //     and/or other materials provided with the distribution.
     26 //
     27 //   * The name of the copyright holders may not be used to endorse or promote products
     28 //     derived from this software without specific prior written permission.
     29 //
     30 // This software is provided by the copyright holders and contributors "as is" and
     31 // any express or implied warranties, including, but not limited to, the implied
     32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     33 // In no event shall the Intel Corporation or contributors be liable for any direct,
     34 // indirect, incidental, special, exemplary, or consequential damages
     35 // (including, but not limited to, procurement of substitute goods or services;
     36 // loss of use, data, or profits; or business interruption) however caused
     37 // and on any theory of liability, whether in contract, strict liability,
     38 // or tort (including negligence or otherwise) arising in any way out of
     39 // the use of this software, even if advised of the possibility of such damage.
     40 //
     41 //M*/
     42 
     43 #include "precomp.hpp"
     44 #include "opencv2/videostab/global_motion.hpp"
     45 #include "opencv2/videostab/ring_buffer.hpp"
     46 #include "opencv2/videostab/outlier_rejection.hpp"
     47 #include "opencv2/opencv_modules.hpp"
     48 #include "clp.hpp"
     49 
     50 #include "opencv2/core/private.cuda.hpp"
     51 
     52 #if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
     53     #if !defined HAVE_CUDA || defined(CUDA_DISABLER)
     54         namespace cv { namespace cuda {
     55             static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
     56         }}
     57     #else
     58         namespace cv { namespace cuda { namespace device { namespace globmotion {
     59             int compactPoints(int N, float *points0, float *points1, const uchar *mask);
     60         }}}}
     61         namespace cv { namespace cuda {
     62             static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
     63             {
     64                 CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
     65                 CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
     66                 CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
     67 
     68                 int npoints = points0.cols;
     69                 int remaining = cv::cuda::device::globmotion::compactPoints(
     70                         npoints, (float*)points0.data, (float*)points1.data, mask.data);
     71 
     72                 points0 = points0.colRange(0, remaining);
     73                 points1 = points1.colRange(0, remaining);
     74             }
     75         }}
     76     #endif
     77 #endif
     78 
     79 namespace cv
     80 {
     81 namespace videostab
     82 {
     83 
     84 // does isotropic normalization
     85 static Mat normalizePoints(int npoints, Point2f *points)
     86 {
     87     float cx = 0.f, cy = 0.f;
     88     for (int i = 0; i < npoints; ++i)
     89     {
     90         cx += points[i].x;
     91         cy += points[i].y;
     92     }
     93     cx /= npoints;
     94     cy /= npoints;
     95 
     96     float d = 0.f;
     97     for (int i = 0; i < npoints; ++i)
     98     {
     99         points[i].x -= cx;
    100         points[i].y -= cy;
    101         d += std::sqrt(sqr(points[i].x) + sqr(points[i].y));
    102     }
    103     d /= npoints;
    104 
    105     float s = std::sqrt(2.f) / d;
    106     for (int i = 0; i < npoints; ++i)
    107     {
    108         points[i].x *= s;
    109         points[i].y *= s;
    110     }
    111 
    112     Mat_<float> T = Mat::eye(3, 3, CV_32F);
    113     T(0,0) = T(1,1) = s;
    114     T(0,2) = -cx*s;
    115     T(1,2) = -cy*s;
    116     return T;
    117 }
    118 
    119 
    120 static Mat estimateGlobMotionLeastSquaresTranslation(
    121         int npoints, Point2f *points0, Point2f *points1, float *rmse)
    122 {
    123     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    124     for (int i = 0; i < npoints; ++i)
    125     {
    126         M(0,2) += points1[i].x - points0[i].x;
    127         M(1,2) += points1[i].y - points0[i].y;
    128     }
    129     M(0,2) /= npoints;
    130     M(1,2) /= npoints;
    131 
    132     if (rmse)
    133     {
    134         *rmse = 0;
    135         for (int i = 0; i < npoints; ++i)
    136             *rmse += sqr(points1[i].x - points0[i].x - M(0,2)) +
    137                      sqr(points1[i].y - points0[i].y - M(1,2));
    138         *rmse = std::sqrt(*rmse / npoints);
    139     }
    140 
    141     return M;
    142 }
    143 
    144 
    145 static Mat estimateGlobMotionLeastSquaresTranslationAndScale(
    146         int npoints, Point2f *points0, Point2f *points1, float *rmse)
    147 {
    148     Mat_<float> T0 = normalizePoints(npoints, points0);
    149     Mat_<float> T1 = normalizePoints(npoints, points1);
    150 
    151     Mat_<float> A(2*npoints, 3), b(2*npoints, 1);
    152     float *a0, *a1;
    153     Point2f p0, p1;
    154 
    155     for (int i = 0; i < npoints; ++i)
    156     {
    157         a0 = A[2*i];
    158         a1 = A[2*i+1];
    159         p0 = points0[i];
    160         p1 = points1[i];
    161         a0[0] = p0.x; a0[1] = 1; a0[2] = 0;
    162         a1[0] = p0.y; a1[1] = 0; a1[2] = 1;
    163         b(2*i,0) = p1.x;
    164         b(2*i+1,0) = p1.y;
    165     }
    166 
    167     Mat_<float> sol;
    168     solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
    169 
    170     if (rmse)
    171         *rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
    172 
    173     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    174     M(0,0) = M(1,1) = sol(0,0);
    175     M(0,2) = sol(1,0);
    176     M(1,2) = sol(2,0);
    177 
    178     return T1.inv() * M * T0;
    179 }
    180 
    181 static Mat estimateGlobMotionLeastSquaresRotation(
    182         int npoints, Point2f *points0, Point2f *points1, float *rmse)
    183 {
    184     Point2f p0, p1;
    185     float A(0), B(0);
    186     for(int i=0; i<npoints; ++i)
    187     {
    188         p0 = points0[i];
    189         p1 = points1[i];
    190 
    191         A += p0.x*p1.x + p0.y*p1.y;
    192         B += p0.x*p1.y - p1.x*p0.y;
    193     }
    194 
    195     // A*sin(alpha) + B*cos(alpha) = 0
    196     float C = std::sqrt(A*A + B*B);
    197     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    198     if ( C != 0 )
    199     {
    200         float sinAlpha = - B / C;
    201         float cosAlpha = A / C;
    202 
    203         M(0,0) = cosAlpha;
    204         M(1,1) = M(0,0);
    205         M(0,1) = sinAlpha;
    206         M(1,0) = - M(0,1);
    207     }
    208 
    209     if (rmse)
    210     {
    211         *rmse = 0;
    212         for (int i = 0; i < npoints; ++i)
    213         {
    214             p0 = points0[i];
    215             p1 = points1[i];
    216             *rmse += sqr(p1.x - M(0,0)*p0.x - M(0,1)*p0.y) +
    217                      sqr(p1.y - M(1,0)*p0.x - M(1,1)*p0.y);
    218         }
    219         *rmse = std::sqrt(*rmse / npoints);
    220     }
    221 
    222     return M;
    223 }
    224 
    225 static Mat  estimateGlobMotionLeastSquaresRigid(
    226         int npoints, Point2f *points0, Point2f *points1, float *rmse)
    227 {
    228     Point2f mean0(0.f, 0.f);
    229     Point2f mean1(0.f, 0.f);
    230 
    231     for (int i = 0; i < npoints; ++i)
    232     {
    233         mean0 += points0[i];
    234         mean1 += points1[i];
    235     }
    236 
    237     mean0 *= 1.f / npoints;
    238     mean1 *= 1.f / npoints;
    239 
    240     Mat_<float> A = Mat::zeros(2, 2, CV_32F);
    241     Point2f pt0, pt1;
    242 
    243     for (int i = 0; i < npoints; ++i)
    244     {
    245         pt0 = points0[i] - mean0;
    246         pt1 = points1[i] - mean1;
    247         A(0,0) += pt1.x * pt0.x;
    248         A(0,1) += pt1.x * pt0.y;
    249         A(1,0) += pt1.y * pt0.x;
    250         A(1,1) += pt1.y * pt0.y;
    251     }
    252 
    253     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    254 
    255     SVD svd(A);
    256     Mat_<float> R = svd.u * svd.vt;
    257     Mat tmp(M(Rect(0,0,2,2)));
    258     R.copyTo(tmp);
    259 
    260     M(0,2) = mean1.x - R(0,0)*mean0.x - R(0,1)*mean0.y;
    261     M(1,2) = mean1.y - R(1,0)*mean0.x - R(1,1)*mean0.y;
    262 
    263     if (rmse)
    264     {
    265         *rmse = 0;
    266         for (int i = 0; i < npoints; ++i)
    267         {
    268             pt0 = points0[i];
    269             pt1 = points1[i];
    270             *rmse += sqr(pt1.x - M(0,0)*pt0.x - M(0,1)*pt0.y - M(0,2)) +
    271                      sqr(pt1.y - M(1,0)*pt0.x - M(1,1)*pt0.y - M(1,2));
    272         }
    273         *rmse = std::sqrt(*rmse / npoints);
    274     }
    275 
    276     return M;
    277 }
    278 
    279 
    280 static Mat estimateGlobMotionLeastSquaresSimilarity(
    281         int npoints, Point2f *points0, Point2f *points1, float *rmse)
    282 {
    283     Mat_<float> T0 = normalizePoints(npoints, points0);
    284     Mat_<float> T1 = normalizePoints(npoints, points1);
    285 
    286     Mat_<float> A(2*npoints, 4), b(2*npoints, 1);
    287     float *a0, *a1;
    288     Point2f p0, p1;
    289 
    290     for (int i = 0; i < npoints; ++i)
    291     {
    292         a0 = A[2*i];
    293         a1 = A[2*i+1];
    294         p0 = points0[i];
    295         p1 = points1[i];
    296         a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = 0;
    297         a1[0] = p0.y; a1[1] = -p0.x; a1[2] = 0; a1[3] = 1;
    298         b(2*i,0) = p1.x;
    299         b(2*i+1,0) = p1.y;
    300     }
    301 
    302     Mat_<float> sol;
    303     solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
    304 
    305     if (rmse)
    306         *rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
    307 
    308     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    309     M(0,0) = M(1,1) = sol(0,0);
    310     M(0,1) = sol(1,0);
    311     M(1,0) = -sol(1,0);
    312     M(0,2) = sol(2,0);
    313     M(1,2) = sol(3,0);
    314 
    315     return T1.inv() * M * T0;
    316 }
    317 
    318 
    319 static Mat estimateGlobMotionLeastSquaresAffine(
    320         int npoints, Point2f *points0, Point2f *points1, float *rmse)
    321 {
    322     Mat_<float> T0 = normalizePoints(npoints, points0);
    323     Mat_<float> T1 = normalizePoints(npoints, points1);
    324 
    325     Mat_<float> A(2*npoints, 6), b(2*npoints, 1);
    326     float *a0, *a1;
    327     Point2f p0, p1;
    328 
    329     for (int i = 0; i < npoints; ++i)
    330     {
    331         a0 = A[2*i];
    332         a1 = A[2*i+1];
    333         p0 = points0[i];
    334         p1 = points1[i];
    335         a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = a0[4] = a0[5] = 0;
    336         a1[0] = a1[1] = a1[2] = 0; a1[3] = p0.x; a1[4] = p0.y; a1[5] = 1;
    337         b(2*i,0) = p1.x;
    338         b(2*i+1,0) = p1.y;
    339     }
    340 
    341     Mat_<float> sol;
    342     solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
    343 
    344     if (rmse)
    345         *rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
    346 
    347     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    348     for (int i = 0, k = 0; i < 2; ++i)
    349         for (int j = 0; j < 3; ++j, ++k)
    350             M(i,j) = sol(k,0);
    351 
    352     return T1.inv() * M * T0;
    353 }
    354 
    355 
    356 Mat estimateGlobalMotionLeastSquares(
    357         InputOutputArray points0, InputOutputArray points1, int model, float *rmse)
    358 {
    359     CV_Assert(model <= MM_AFFINE);
    360     CV_Assert(points0.type() == points1.type());
    361     const int npoints = points0.getMat().checkVector(2);
    362     CV_Assert(points1.getMat().checkVector(2) == npoints);
    363 
    364     typedef Mat (*Impl)(int, Point2f*, Point2f*, float*);
    365     static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation,
    366                             estimateGlobMotionLeastSquaresTranslationAndScale,
    367                             estimateGlobMotionLeastSquaresRotation,
    368                             estimateGlobMotionLeastSquaresRigid,
    369                             estimateGlobMotionLeastSquaresSimilarity,
    370                             estimateGlobMotionLeastSquaresAffine };
    371 
    372     Point2f *points0_ = points0.getMat().ptr<Point2f>();
    373     Point2f *points1_ = points1.getMat().ptr<Point2f>();
    374 
    375     return impls[model](npoints, points0_, points1_, rmse);
    376 }
    377 
    378 
    379 Mat estimateGlobalMotionRansac(
    380         InputArray points0, InputArray points1, int model, const RansacParams &params,
    381         float *rmse, int *ninliers)
    382 {
    383     CV_Assert(model <= MM_AFFINE);
    384     CV_Assert(points0.type() == points1.type());
    385     const int npoints = points0.getMat().checkVector(2);
    386     CV_Assert(points1.getMat().checkVector(2) == npoints);
    387 
    388     if (npoints < params.size)
    389         return Mat::eye(3, 3, CV_32F);
    390 
    391     const Point2f *points0_ = points0.getMat().ptr<Point2f>();
    392     const Point2f *points1_ = points1.getMat().ptr<Point2f>();
    393     const int niters = params.niters();
    394 
    395     // current hypothesis
    396     std::vector<int> indices(params.size);
    397     std::vector<Point2f> subset0(params.size);
    398     std::vector<Point2f> subset1(params.size);
    399 
    400     // best hypothesis
    401     std::vector<Point2f> subset0best(params.size);
    402     std::vector<Point2f> subset1best(params.size);
    403     Mat_<float> bestM;
    404     int ninliersMax = -1;
    405 
    406     RNG rng(0);
    407     Point2f p0, p1;
    408     float x, y;
    409 
    410     for (int iter = 0; iter < niters; ++iter)
    411     {
    412         for (int i = 0; i < params.size; ++i)
    413         {
    414             bool ok = false;
    415             while (!ok)
    416             {
    417                 ok = true;
    418                 indices[i] = static_cast<unsigned>(rng) % npoints;
    419                 for (int j = 0; j < i; ++j)
    420                     if (indices[i] == indices[j])
    421                         { ok = false; break; }
    422             }
    423         }
    424         for (int i = 0; i < params.size; ++i)
    425         {
    426             subset0[i] = points0_[indices[i]];
    427             subset1[i] = points1_[indices[i]];
    428         }
    429 
    430         Mat_<float> M = estimateGlobalMotionLeastSquares(subset0, subset1, model, 0);
    431 
    432         int numinliers = 0;
    433         for (int i = 0; i < npoints; ++i)
    434         {
    435             p0 = points0_[i];
    436             p1 = points1_[i];
    437             x = M(0,0)*p0.x + M(0,1)*p0.y + M(0,2);
    438             y = M(1,0)*p0.x + M(1,1)*p0.y + M(1,2);
    439             if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
    440                 numinliers++;
    441         }
    442         if (numinliers >= ninliersMax)
    443         {
    444             bestM = M;
    445             ninliersMax = numinliers;
    446             subset0best.swap(subset0);
    447             subset1best.swap(subset1);
    448         }
    449     }
    450 
    451     if (ninliersMax < params.size)
    452         // compute RMSE
    453         bestM = estimateGlobalMotionLeastSquares(subset0best, subset1best, model, rmse);
    454     else
    455     {
    456         subset0.resize(ninliersMax);
    457         subset1.resize(ninliersMax);
    458         for (int i = 0, j = 0; i < npoints && j < ninliersMax ; ++i)
    459         {
    460             p0 = points0_[i];
    461             p1 = points1_[i];
    462             x = bestM(0,0)*p0.x + bestM(0,1)*p0.y + bestM(0,2);
    463             y = bestM(1,0)*p0.x + bestM(1,1)*p0.y + bestM(1,2);
    464             if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
    465             {
    466                 subset0[j] = p0;
    467                 subset1[j] = p1;
    468                 j++;
    469             }
    470         }
    471         bestM = estimateGlobalMotionLeastSquares(subset0, subset1, model, rmse);
    472     }
    473 
    474     if (ninliers)
    475         *ninliers = ninliersMax;
    476 
    477     return bestM;
    478 }
    479 
    480 
    481 MotionEstimatorRansacL2::MotionEstimatorRansacL2(MotionModel model)
    482     : MotionEstimatorBase(model)
    483 {
    484     setRansacParams(RansacParams::default2dMotion(model));
    485     setMinInlierRatio(0.1f);
    486 }
    487 
    488 
    489 Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bool *ok)
    490 {
    491     CV_Assert(points0.type() == points1.type());
    492     const int npoints = points0.getMat().checkVector(2);
    493     CV_Assert(points1.getMat().checkVector(2) == npoints);
    494 
    495     // find motion
    496 
    497     int ninliers = 0;
    498     Mat_<float> M;
    499 
    500     if (motionModel() != MM_HOMOGRAPHY)
    501         M = estimateGlobalMotionRansac(
    502                 points0, points1, motionModel(), ransacParams_, 0, &ninliers);
    503     else
    504     {
    505         std::vector<uchar> mask;
    506         M = findHomography(points0, points1, mask, LMEDS);
    507         for (int i  = 0; i < npoints; ++i)
    508             if (mask[i]) ninliers++;
    509     }
    510 
    511     // check if we're confident enough in estimated motion
    512 
    513     if (ok) *ok = true;
    514     if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
    515     {
    516         M = Mat::eye(3, 3, CV_32F);
    517         if (ok) *ok = false;
    518     }
    519 
    520     return M;
    521 }
    522 
    523 
    524 MotionEstimatorL1::MotionEstimatorL1(MotionModel model)
    525     : MotionEstimatorBase(model)
    526 {
    527 }
    528 
    529 
    530 // TODO will estimation of all motions as one LP problem be faster?
    531 Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok)
    532 {
    533     CV_Assert(points0.type() == points1.type());
    534     const int npoints = points0.getMat().checkVector(2);
    535     CV_Assert(points1.getMat().checkVector(2) == npoints);
    536 
    537 #ifndef HAVE_CLP
    538 
    539     CV_Error(Error::StsError, "The library is built without Clp support");
    540     if (ok) *ok = false;
    541     return Mat::eye(3, 3, CV_32F);
    542 
    543 #else
    544 
    545     CV_Assert(motionModel() <= MM_AFFINE && motionModel() != MM_RIGID);
    546 
    547     if(npoints <= 0)
    548         return Mat::eye(3, 3, CV_32F);
    549 
    550     // prepare LP problem
    551 
    552     const Point2f *points0_ = points0.getMat().ptr<Point2f>();
    553     const Point2f *points1_ = points1.getMat().ptr<Point2f>();
    554 
    555     int ncols = 6 + 2*npoints;
    556     int nrows = 4*npoints;
    557 
    558     if (motionModel() == MM_SIMILARITY)
    559         nrows += 2;
    560     else if (motionModel() == MM_TRANSLATION_AND_SCALE)
    561         nrows += 3;
    562     else if (motionModel() == MM_TRANSLATION)
    563         nrows += 4;
    564 
    565     rows_.clear();
    566     cols_.clear();
    567     elems_.clear();
    568     obj_.assign(ncols, 0);
    569     collb_.assign(ncols, -INF);
    570     colub_.assign(ncols, INF);
    571 
    572     int c = 6;
    573 
    574     for (int i = 0; i < npoints; ++i, c += 2)
    575     {
    576         obj_[c] = 1;
    577         collb_[c] = 0;
    578 
    579         obj_[c+1] = 1;
    580         collb_[c+1] = 0;
    581     }
    582 
    583     elems_.clear();
    584     rowlb_.assign(nrows, -INF);
    585     rowub_.assign(nrows, INF);
    586 
    587     int r = 0;
    588     Point2f p0, p1;
    589 
    590     for (int i = 0; i < npoints; ++i, r += 4)
    591     {
    592         p0 = points0_[i];
    593         p1 = points1_[i];
    594 
    595         set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
    596         rowub_[r] = p1.x;
    597 
    598         set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
    599         rowub_[r+1] = p1.y;
    600 
    601         set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
    602         rowlb_[r+2] = p1.x;
    603 
    604         set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
    605         rowlb_[r+3] = p1.y;
    606     }
    607 
    608     if (motionModel() == MM_SIMILARITY)
    609     {
    610         set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
    611         set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
    612     }
    613     else if (motionModel() == MM_TRANSLATION_AND_SCALE)
    614     {
    615         set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
    616         set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
    617         set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
    618     }
    619     else if (motionModel() == MM_TRANSLATION)
    620     {
    621         set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
    622         set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
    623         set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
    624         set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
    625     }
    626 
    627     // solve
    628 
    629     CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
    630     A.setDimensions(nrows, ncols);
    631 
    632     ClpSimplex model(false);
    633     model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
    634 
    635     ClpDualRowSteepest dualSteep(1);
    636     model.setDualRowPivotAlgorithm(dualSteep);
    637     model.scaling(1);
    638 
    639     model.dual();
    640 
    641     // extract motion
    642 
    643     const double *sol = model.getColSolution();
    644 
    645     Mat_<float> M = Mat::eye(3, 3, CV_32F);
    646     M(0,0) = sol[0];
    647     M(0,1) = sol[1];
    648     M(0,2) = sol[2];
    649     M(1,0) = sol[3];
    650     M(1,1) = sol[4];
    651     M(1,2) = sol[5];
    652 
    653     if (ok) *ok = true;
    654     return M;
    655 #endif
    656 }
    657 
    658 
    659 FromFileMotionReader::FromFileMotionReader(const String &path)
    660     : ImageMotionEstimatorBase(MM_UNKNOWN)
    661 {
    662     file_.open(path.c_str());
    663     CV_Assert(file_.is_open());
    664 }
    665 
    666 
    667 Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, bool *ok)
    668 {
    669     Mat_<float> M(3, 3);
    670     bool ok_;
    671     file_ >> M(0,0) >> M(0,1) >> M(0,2)
    672           >> M(1,0) >> M(1,1) >> M(1,2)
    673           >> M(2,0) >> M(2,1) >> M(2,2) >> ok_;
    674     if (ok) *ok = ok_;
    675     return M;
    676 }
    677 
    678 
    679 ToFileMotionWriter::ToFileMotionWriter(const String &path, Ptr<ImageMotionEstimatorBase> estimator)
    680     : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
    681 {
    682     file_.open(path.c_str());
    683     CV_Assert(file_.is_open());
    684 }
    685 
    686 
    687 Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
    688 {
    689     bool ok_;
    690     Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
    691     file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
    692           << M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
    693           << M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << std::endl;
    694     if (ok) *ok = ok_;
    695     return M;
    696 }
    697 
    698 
    699 KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
    700     : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
    701 {
    702     setDetector(GFTTDetector::create());
    703     setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
    704     setOutlierRejector(makePtr<NullOutlierRejector>());
    705 }
    706 
    707 
    708 Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
    709 {
    710     // find keypoints
    711     detector_->detect(frame0, keypointsPrev_);
    712     if (keypointsPrev_.empty())
    713         return Mat::eye(3, 3, CV_32F);
    714 
    715     // extract points from keypoints
    716     pointsPrev_.resize(keypointsPrev_.size());
    717     for (size_t i = 0; i < keypointsPrev_.size(); ++i)
    718         pointsPrev_[i] = keypointsPrev_[i].pt;
    719 
    720     // find correspondences
    721     optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
    722 
    723     // leave good correspondences only
    724 
    725     pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
    726     pointsGood_.clear(); pointsGood_.reserve(points_.size());
    727 
    728     for (size_t i = 0; i < points_.size(); ++i)
    729     {
    730         if (status_[i])
    731         {
    732             pointsPrevGood_.push_back(pointsPrev_[i]);
    733             pointsGood_.push_back(points_[i]);
    734         }
    735     }
    736 
    737     // perform outlier rejection
    738 
    739     IOutlierRejector *outlRejector = outlierRejector_.get();
    740     if (!dynamic_cast<NullOutlierRejector*>(outlRejector))
    741     {
    742         pointsPrev_.swap(pointsPrevGood_);
    743         points_.swap(pointsGood_);
    744 
    745         outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
    746 
    747         pointsPrevGood_.clear();
    748         pointsPrevGood_.reserve(points_.size());
    749 
    750         pointsGood_.clear();
    751         pointsGood_.reserve(points_.size());
    752 
    753         for (size_t i = 0; i < points_.size(); ++i)
    754         {
    755             if (status_[i])
    756             {
    757                 pointsPrevGood_.push_back(pointsPrev_[i]);
    758                 pointsGood_.push_back(points_[i]);
    759             }
    760         }
    761     }
    762 
    763     // estimate motion
    764     return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok);
    765 }
    766 
    767 #if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
    768 
    769 KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator)
    770     : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
    771 {
    772     detector_ = cuda::createGoodFeaturesToTrackDetector(CV_8UC1);
    773 
    774     CV_Assert(cuda::getCudaEnabledDeviceCount() > 0);
    775     setOutlierRejector(makePtr<NullOutlierRejector>());
    776 }
    777 
    778 
    779 Mat KeypointBasedMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
    780 {
    781     frame0_.upload(frame0);
    782     frame1_.upload(frame1);
    783     return estimate(frame0_, frame1_, ok);
    784 }
    785 
    786 
    787 Mat KeypointBasedMotionEstimatorGpu::estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok)
    788 {
    789     // convert frame to gray if it's color
    790 
    791     cuda::GpuMat grayFrame0;
    792     if (frame0.channels() == 1)
    793         grayFrame0 = frame0;
    794     else
    795     {
    796         cuda::cvtColor(frame0, grayFrame0_, COLOR_BGR2GRAY);
    797         grayFrame0 = grayFrame0_;
    798     }
    799 
    800     // find keypoints
    801     detector_->detect(grayFrame0, pointsPrev_);
    802 
    803     // find correspondences
    804     optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
    805 
    806     // leave good correspondences only
    807     cuda::compactPoints(pointsPrev_, points_, status_);
    808 
    809     pointsPrev_.download(hostPointsPrev_);
    810     points_.download(hostPoints_);
    811 
    812     // perform outlier rejection
    813 
    814     IOutlierRejector *rejector = outlierRejector_.get();
    815     if (!dynamic_cast<NullOutlierRejector*>(rejector))
    816     {
    817         outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
    818 
    819         hostPointsPrevTmp_.clear();
    820         hostPointsPrevTmp_.reserve(hostPoints_.cols);
    821 
    822         hostPointsTmp_.clear();
    823         hostPointsTmp_.reserve(hostPoints_.cols);
    824 
    825         for (int i = 0; i < hostPoints_.cols; ++i)
    826         {
    827             if (rejectionStatus_[i])
    828             {
    829                 hostPointsPrevTmp_.push_back(hostPointsPrev_.at<Point2f>(0,i));
    830                 hostPointsTmp_.push_back(hostPoints_.at<Point2f>(0,i));
    831             }
    832         }
    833 
    834         hostPointsPrev_ = Mat(1, (int)hostPointsPrevTmp_.size(), CV_32FC2, &hostPointsPrevTmp_[0]);
    835         hostPoints_ = Mat(1, (int)hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
    836     }
    837 
    838     // estimate motion
    839     return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok);
    840 }
    841 
    842 #endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
    843 
    844 
    845 Mat getMotion(int from, int to, const std::vector<Mat> &motions)
    846 {
    847     Mat M = Mat::eye(3, 3, CV_32F);
    848     if (to > from)
    849     {
    850         for (int i = from; i < to; ++i)
    851             M = at(i, motions) * M;
    852     }
    853     else if (from > to)
    854     {
    855         for (int i = to; i < from; ++i)
    856             M = at(i, motions) * M;
    857         M = M.inv();
    858     }
    859     return M;
    860 }
    861 
    862 } // namespace videostab
    863 } // namespace cv
    864