Home | History | Annotate | Download | only in src
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                           License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
     14 // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
     15 // Third party copyrights are property of their respective owners.
     16 //
     17 // Redistribution and use in source and binary forms, with or without modification,
     18 // are permitted provided that the following conditions are met:
     19 //
     20 //   * Redistribution's of source code must retain the above copyright notice,
     21 //     this list of conditions and the following disclaimer.
     22 //
     23 //   * Redistribution's in binary form must reproduce the above copyright notice,
     24 //     this list of conditions and the following disclaimer in the documentation
     25 //     and/or other materials provided with the distribution.
     26 //
     27 //   * The name of the copyright holders may not be used to endorse or promote products
     28 //     derived from this software without specific prior written permission.
     29 //
     30 // This software is provided by the copyright holders and contributors "as is" and
     31 // any express or implied warranties, including, but not limited to, the implied
     32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     33 // In no event shall the Intel Corporation or contributors be liable for any direct,
     34 // indirect, incidental, special, exemplary, or consequential damages
     35 // (including, but not limited to, procurement of substitute goods or services;
     36 // loss of use, data, or profits; or business interruption) however caused
     37 // and on any theory of liability, whether in contract, strict liability,
     38 // or tort (including negligence or otherwise) arising in any way out of
     39 // the use of this software, even if advised of the possibility of such damage.
     40 //
     41 //M*/
     42 
     43 #include "precomp.hpp"
     44 #include "fisheye.hpp"
     45 
     46 namespace cv { namespace
     47 {
     48     struct JacobianRow
     49     {
     50         Vec2d df, dc;
     51         Vec4d dk;
     52         Vec3d dom, dT;
     53         double dalpha;
     54     };
     55 
     56     void subMatrix(const Mat& src, Mat& dst, const std::vector<int>& cols, const std::vector<int>& rows);
     57 }}
     58 
     59 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
     60 /// cv::fisheye::projectPoints
     61 
     62 void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
     63     InputArray K, InputArray D, double alpha, OutputArray jacobian)
     64 {
     65     projectPoints(objectPoints, imagePoints, affine.rvec(), affine.translation(), K, D, alpha, jacobian);
     66 }
     67 
     68 void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray _rvec,
     69         InputArray _tvec, InputArray _K, InputArray _D, double alpha, OutputArray jacobian)
     70 {
     71     // will support only 3-channel data now for points
     72     CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3);
     73     imagePoints.create(objectPoints.size(), CV_MAKETYPE(objectPoints.depth(), 2));
     74     size_t n = objectPoints.total();
     75 
     76     CV_Assert(_rvec.total() * _rvec.channels() == 3 && (_rvec.depth() == CV_32F || _rvec.depth() == CV_64F));
     77     CV_Assert(_tvec.total() * _tvec.channels() == 3 && (_tvec.depth() == CV_32F || _tvec.depth() == CV_64F));
     78     CV_Assert(_tvec.getMat().isContinuous() && _rvec.getMat().isContinuous());
     79 
     80     Vec3d om = _rvec.depth() == CV_32F ? (Vec3d)*_rvec.getMat().ptr<Vec3f>() : *_rvec.getMat().ptr<Vec3d>();
     81     Vec3d T  = _tvec.depth() == CV_32F ? (Vec3d)*_tvec.getMat().ptr<Vec3f>() : *_tvec.getMat().ptr<Vec3d>();
     82 
     83     CV_Assert(_K.size() == Size(3,3) && (_K.type() == CV_32F || _K.type() == CV_64F) && _D.type() == _K.type() && _D.total() == 4);
     84 
     85     cv::Vec2d f, c;
     86     if (_K.depth() == CV_32F)
     87     {
     88 
     89         Matx33f K = _K.getMat();
     90         f = Vec2f(K(0, 0), K(1, 1));
     91         c = Vec2f(K(0, 2), K(1, 2));
     92     }
     93     else
     94     {
     95         Matx33d K = _K.getMat();
     96         f = Vec2d(K(0, 0), K(1, 1));
     97         c = Vec2d(K(0, 2), K(1, 2));
     98     }
     99 
    100     Vec4d k = _D.depth() == CV_32F ? (Vec4d)*_D.getMat().ptr<Vec4f>(): *_D.getMat().ptr<Vec4d>();
    101 
    102     JacobianRow *Jn = 0;
    103     if (jacobian.needed())
    104     {
    105         int nvars = 2 + 2 + 1 + 4 + 3 + 3; // f, c, alpha, k, om, T,
    106         jacobian.create(2*(int)n, nvars, CV_64F);
    107         Jn = jacobian.getMat().ptr<JacobianRow>(0);
    108     }
    109 
    110     Matx33d R;
    111     Matx<double, 3, 9> dRdom;
    112     Rodrigues(om, R, dRdom);
    113     Affine3d aff(om, T);
    114 
    115     const Vec3f* Xf = objectPoints.getMat().ptr<Vec3f>();
    116     const Vec3d* Xd = objectPoints.getMat().ptr<Vec3d>();
    117     Vec2f *xpf = imagePoints.getMat().ptr<Vec2f>();
    118     Vec2d *xpd = imagePoints.getMat().ptr<Vec2d>();
    119 
    120     for(size_t i = 0; i < n; ++i)
    121     {
    122         Vec3d Xi = objectPoints.depth() == CV_32F ? (Vec3d)Xf[i] : Xd[i];
    123         Vec3d Y = aff*Xi;
    124 
    125         Vec2d x(Y[0]/Y[2], Y[1]/Y[2]);
    126 
    127         double r2 = x.dot(x);
    128         double r = std::sqrt(r2);
    129 
    130         // Angle of the incoming ray:
    131         double theta = atan(r);
    132 
    133         double theta2 = theta*theta, theta3 = theta2*theta, theta4 = theta2*theta2, theta5 = theta4*theta,
    134                 theta6 = theta3*theta3, theta7 = theta6*theta, theta8 = theta4*theta4, theta9 = theta8*theta;
    135 
    136         double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9;
    137 
    138         double inv_r = r > 1e-8 ? 1.0/r : 1;
    139         double cdist = r > 1e-8 ? theta_d * inv_r : 1;
    140 
    141         Vec2d xd1 = x * cdist;
    142         Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]);
    143         Vec2d final_point(xd3[0] * f[0] + c[0], xd3[1] * f[1] + c[1]);
    144 
    145         if (objectPoints.depth() == CV_32F)
    146             xpf[i] = final_point;
    147         else
    148             xpd[i] = final_point;
    149 
    150         if (jacobian.needed())
    151         {
    152             //Vec3d Xi = pdepth == CV_32F ? (Vec3d)Xf[i] : Xd[i];
    153             //Vec3d Y = aff*Xi;
    154             double dYdR[] = { Xi[0], Xi[1], Xi[2], 0, 0, 0, 0, 0, 0,
    155                               0, 0, 0, Xi[0], Xi[1], Xi[2], 0, 0, 0,
    156                               0, 0, 0, 0, 0, 0, Xi[0], Xi[1], Xi[2] };
    157 
    158             Matx33d dYdom_data = Matx<double, 3, 9>(dYdR) * dRdom.t();
    159             const Vec3d *dYdom = (Vec3d*)dYdom_data.val;
    160 
    161             Matx33d dYdT_data = Matx33d::eye();
    162             const Vec3d *dYdT = (Vec3d*)dYdT_data.val;
    163 
    164             //Vec2d x(Y[0]/Y[2], Y[1]/Y[2]);
    165             Vec3d dxdom[2];
    166             dxdom[0] = (1.0/Y[2]) * dYdom[0] - x[0]/Y[2] * dYdom[2];
    167             dxdom[1] = (1.0/Y[2]) * dYdom[1] - x[1]/Y[2] * dYdom[2];
    168 
    169             Vec3d dxdT[2];
    170             dxdT[0]  = (1.0/Y[2]) * dYdT[0] - x[0]/Y[2] * dYdT[2];
    171             dxdT[1]  = (1.0/Y[2]) * dYdT[1] - x[1]/Y[2] * dYdT[2];
    172 
    173             //double r2 = x.dot(x);
    174             Vec3d dr2dom = 2 * x[0] * dxdom[0] + 2 * x[1] * dxdom[1];
    175             Vec3d dr2dT  = 2 * x[0] *  dxdT[0] + 2 * x[1] *  dxdT[1];
    176 
    177             //double r = std::sqrt(r2);
    178             double drdr2 = r > 1e-8 ? 1.0/(2*r) : 1;
    179             Vec3d drdom = drdr2 * dr2dom;
    180             Vec3d drdT  = drdr2 * dr2dT;
    181 
    182             // Angle of the incoming ray:
    183             //double theta = atan(r);
    184             double dthetadr = 1.0/(1+r2);
    185             Vec3d dthetadom = dthetadr * drdom;
    186             Vec3d dthetadT  = dthetadr *  drdT;
    187 
    188             //double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9;
    189             double dtheta_ddtheta = 1 + 3*k[0]*theta2 + 5*k[1]*theta4 + 7*k[2]*theta6 + 9*k[3]*theta8;
    190             Vec3d dtheta_ddom = dtheta_ddtheta * dthetadom;
    191             Vec3d dtheta_ddT  = dtheta_ddtheta * dthetadT;
    192             Vec4d dtheta_ddk  = Vec4d(theta3, theta5, theta7, theta9);
    193 
    194             //double inv_r = r > 1e-8 ? 1.0/r : 1;
    195             //double cdist = r > 1e-8 ? theta_d / r : 1;
    196             Vec3d dcdistdom = inv_r * (dtheta_ddom - cdist*drdom);
    197             Vec3d dcdistdT  = inv_r * (dtheta_ddT  - cdist*drdT);
    198             Vec4d dcdistdk  = inv_r *  dtheta_ddk;
    199 
    200             //Vec2d xd1 = x * cdist;
    201             Vec4d dxd1dk[2];
    202             Vec3d dxd1dom[2], dxd1dT[2];
    203             dxd1dom[0] = x[0] * dcdistdom + cdist * dxdom[0];
    204             dxd1dom[1] = x[1] * dcdistdom + cdist * dxdom[1];
    205             dxd1dT[0]  = x[0] * dcdistdT  + cdist * dxdT[0];
    206             dxd1dT[1]  = x[1] * dcdistdT  + cdist * dxdT[1];
    207             dxd1dk[0]  = x[0] * dcdistdk;
    208             dxd1dk[1]  = x[1] * dcdistdk;
    209 
    210             //Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]);
    211             Vec4d dxd3dk[2];
    212             Vec3d dxd3dom[2], dxd3dT[2];
    213             dxd3dom[0] = dxd1dom[0] + alpha * dxd1dom[1];
    214             dxd3dom[1] = dxd1dom[1];
    215             dxd3dT[0]  = dxd1dT[0]  + alpha * dxd1dT[1];
    216             dxd3dT[1]  = dxd1dT[1];
    217             dxd3dk[0]  = dxd1dk[0]  + alpha * dxd1dk[1];
    218             dxd3dk[1]  = dxd1dk[1];
    219 
    220             Vec2d dxd3dalpha(xd1[1], 0);
    221 
    222             //final jacobian
    223             Jn[0].dom = f[0] * dxd3dom[0];
    224             Jn[1].dom = f[1] * dxd3dom[1];
    225 
    226             Jn[0].dT = f[0] * dxd3dT[0];
    227             Jn[1].dT = f[1] * dxd3dT[1];
    228 
    229             Jn[0].dk = f[0] * dxd3dk[0];
    230             Jn[1].dk = f[1] * dxd3dk[1];
    231 
    232             Jn[0].dalpha = f[0] * dxd3dalpha[0];
    233             Jn[1].dalpha = 0; //f[1] * dxd3dalpha[1];
    234 
    235             Jn[0].df = Vec2d(xd3[0], 0);
    236             Jn[1].df = Vec2d(0, xd3[1]);
    237 
    238             Jn[0].dc = Vec2d(1, 0);
    239             Jn[1].dc = Vec2d(0, 1);
    240 
    241             //step to jacobian rows for next point
    242             Jn += 2;
    243         }
    244     }
    245 }
    246 
    247 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    248 /// cv::fisheye::distortPoints
    249 
    250 void cv::fisheye::distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha)
    251 {
    252     // will support only 2-channel data now for points
    253     CV_Assert(undistorted.type() == CV_32FC2 || undistorted.type() == CV_64FC2);
    254     distorted.create(undistorted.size(), undistorted.type());
    255     size_t n = undistorted.total();
    256 
    257     CV_Assert(K.size() == Size(3,3) && (K.type() == CV_32F || K.type() == CV_64F) && D.total() == 4);
    258 
    259     cv::Vec2d f, c;
    260     if (K.depth() == CV_32F)
    261     {
    262         Matx33f camMat = K.getMat();
    263         f = Vec2f(camMat(0, 0), camMat(1, 1));
    264         c = Vec2f(camMat(0, 2), camMat(1, 2));
    265     }
    266     else
    267     {
    268         Matx33d camMat = K.getMat();
    269         f = Vec2d(camMat(0, 0), camMat(1, 1));
    270         c = Vec2d(camMat(0 ,2), camMat(1, 2));
    271     }
    272 
    273     Vec4d k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
    274 
    275     const Vec2f* Xf = undistorted.getMat().ptr<Vec2f>();
    276     const Vec2d* Xd = undistorted.getMat().ptr<Vec2d>();
    277     Vec2f *xpf = distorted.getMat().ptr<Vec2f>();
    278     Vec2d *xpd = distorted.getMat().ptr<Vec2d>();
    279 
    280     for(size_t i = 0; i < n; ++i)
    281     {
    282         Vec2d x = undistorted.depth() == CV_32F ? (Vec2d)Xf[i] : Xd[i];
    283 
    284         double r2 = x.dot(x);
    285         double r = std::sqrt(r2);
    286 
    287         // Angle of the incoming ray:
    288         double theta = atan(r);
    289 
    290         double theta2 = theta*theta, theta3 = theta2*theta, theta4 = theta2*theta2, theta5 = theta4*theta,
    291                 theta6 = theta3*theta3, theta7 = theta6*theta, theta8 = theta4*theta4, theta9 = theta8*theta;
    292 
    293         double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9;
    294 
    295         double inv_r = r > 1e-8 ? 1.0/r : 1;
    296         double cdist = r > 1e-8 ? theta_d * inv_r : 1;
    297 
    298         Vec2d xd1 = x * cdist;
    299         Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]);
    300         Vec2d final_point(xd3[0] * f[0] + c[0], xd3[1] * f[1] + c[1]);
    301 
    302         if (undistorted.depth() == CV_32F)
    303             xpf[i] = final_point;
    304         else
    305             xpd[i] = final_point;
    306     }
    307 }
    308 
    309 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    310 /// cv::fisheye::undistortPoints
    311 
    312 void cv::fisheye::undistortPoints( InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray R, InputArray P)
    313 {
    314     // will support only 2-channel data now for points
    315     CV_Assert(distorted.type() == CV_32FC2 || distorted.type() == CV_64FC2);
    316     undistorted.create(distorted.size(), distorted.type());
    317 
    318     CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3));
    319     CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3);
    320     CV_Assert(D.total() == 4 && K.size() == Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F));
    321 
    322     cv::Vec2d f, c;
    323     if (K.depth() == CV_32F)
    324     {
    325         Matx33f camMat = K.getMat();
    326         f = Vec2f(camMat(0, 0), camMat(1, 1));
    327         c = Vec2f(camMat(0, 2), camMat(1, 2));
    328     }
    329     else
    330     {
    331         Matx33d camMat = K.getMat();
    332         f = Vec2d(camMat(0, 0), camMat(1, 1));
    333         c = Vec2d(camMat(0, 2), camMat(1, 2));
    334     }
    335 
    336     Vec4d k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
    337 
    338     cv::Matx33d RR = cv::Matx33d::eye();
    339     if (!R.empty() && R.total() * R.channels() == 3)
    340     {
    341         cv::Vec3d rvec;
    342         R.getMat().convertTo(rvec, CV_64F);
    343         RR = cv::Affine3d(rvec).rotation();
    344     }
    345     else if (!R.empty() && R.size() == Size(3, 3))
    346         R.getMat().convertTo(RR, CV_64F);
    347 
    348     if(!P.empty())
    349     {
    350         cv::Matx33d PP;
    351         P.getMat().colRange(0, 3).convertTo(PP, CV_64F);
    352         RR = PP * RR;
    353     }
    354 
    355     // start undistorting
    356     const cv::Vec2f* srcf = distorted.getMat().ptr<cv::Vec2f>();
    357     const cv::Vec2d* srcd = distorted.getMat().ptr<cv::Vec2d>();
    358     cv::Vec2f* dstf = undistorted.getMat().ptr<cv::Vec2f>();
    359     cv::Vec2d* dstd = undistorted.getMat().ptr<cv::Vec2d>();
    360 
    361     size_t n = distorted.total();
    362     int sdepth = distorted.depth();
    363 
    364     for(size_t i = 0; i < n; i++ )
    365     {
    366         Vec2d pi = sdepth == CV_32F ? (Vec2d)srcf[i] : srcd[i];  // image point
    367         Vec2d pw((pi[0] - c[0])/f[0], (pi[1] - c[1])/f[1]);      // world point
    368 
    369         double scale = 1.0;
    370 
    371         double theta_d = sqrt(pw[0]*pw[0] + pw[1]*pw[1]);
    372         if (theta_d > 1e-8)
    373         {
    374             // compensate distortion iteratively
    375             double theta = theta_d;
    376             for(int j = 0; j < 10; j++ )
    377             {
    378                 double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta6*theta2;
    379                 theta = theta_d / (1 + k[0] * theta2 + k[1] * theta4 + k[2] * theta6 + k[3] * theta8);
    380             }
    381 
    382             scale = std::tan(theta) / theta_d;
    383         }
    384 
    385         Vec2d pu = pw * scale; //undistorted point
    386 
    387         // reproject
    388         Vec3d pr = RR * Vec3d(pu[0], pu[1], 1.0); // rotated point optionally multiplied by new camera matrix
    389         Vec2d fi(pr[0]/pr[2], pr[1]/pr[2]);       // final
    390 
    391         if( sdepth == CV_32F )
    392             dstf[i] = fi;
    393         else
    394             dstd[i] = fi;
    395     }
    396 }
    397 
    398 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    399 /// cv::fisheye::undistortPoints
    400 
    401 void cv::fisheye::initUndistortRectifyMap( InputArray K, InputArray D, InputArray R, InputArray P,
    402     const cv::Size& size, int m1type, OutputArray map1, OutputArray map2 )
    403 {
    404     CV_Assert( m1type == CV_16SC2 || m1type == CV_32F || m1type <=0 );
    405     map1.create( size, m1type <= 0 ? CV_16SC2 : m1type );
    406     map2.create( size, map1.type() == CV_16SC2 ? CV_16UC1 : CV_32F );
    407 
    408     CV_Assert((K.depth() == CV_32F || K.depth() == CV_64F) && (D.depth() == CV_32F || D.depth() == CV_64F));
    409     CV_Assert((P.depth() == CV_32F || P.depth() == CV_64F) && (R.depth() == CV_32F || R.depth() == CV_64F));
    410     CV_Assert(K.size() == Size(3, 3) && (D.empty() || D.total() == 4));
    411     CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3);
    412     CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3));
    413 
    414     cv::Vec2d f, c;
    415     if (K.depth() == CV_32F)
    416     {
    417         Matx33f camMat = K.getMat();
    418         f = Vec2f(camMat(0, 0), camMat(1, 1));
    419         c = Vec2f(camMat(0, 2), camMat(1, 2));
    420     }
    421     else
    422     {
    423         Matx33d camMat = K.getMat();
    424         f = Vec2d(camMat(0, 0), camMat(1, 1));
    425         c = Vec2d(camMat(0, 2), camMat(1, 2));
    426     }
    427 
    428     Vec4d k = Vec4d::all(0);
    429     if (!D.empty())
    430         k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
    431 
    432     cv::Matx33d RR  = cv::Matx33d::eye();
    433     if (!R.empty() && R.total() * R.channels() == 3)
    434     {
    435         cv::Vec3d rvec;
    436         R.getMat().convertTo(rvec, CV_64F);
    437         RR = Affine3d(rvec).rotation();
    438     }
    439     else if (!R.empty() && R.size() == Size(3, 3))
    440         R.getMat().convertTo(RR, CV_64F);
    441 
    442     cv::Matx33d PP = cv::Matx33d::eye();
    443     if (!P.empty())
    444         P.getMat().colRange(0, 3).convertTo(PP, CV_64F);
    445 
    446     cv::Matx33d iR = (PP * RR).inv(cv::DECOMP_SVD);
    447 
    448     for( int i = 0; i < size.height; ++i)
    449     {
    450         float* m1f = map1.getMat().ptr<float>(i);
    451         float* m2f = map2.getMat().ptr<float>(i);
    452         short*  m1 = (short*)m1f;
    453         ushort* m2 = (ushort*)m2f;
    454 
    455         double _x = i*iR(0, 1) + iR(0, 2),
    456                _y = i*iR(1, 1) + iR(1, 2),
    457                _w = i*iR(2, 1) + iR(2, 2);
    458 
    459         for( int j = 0; j < size.width; ++j)
    460         {
    461             double x = _x/_w, y = _y/_w;
    462 
    463             double r = sqrt(x*x + y*y);
    464             double theta = atan(r);
    465 
    466             double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta4*theta4;
    467             double theta_d = theta * (1 + k[0]*theta2 + k[1]*theta4 + k[2]*theta6 + k[3]*theta8);
    468 
    469             double scale = (r == 0) ? 1.0 : theta_d / r;
    470             double u = f[0]*x*scale + c[0];
    471             double v = f[1]*y*scale + c[1];
    472 
    473             if( m1type == CV_16SC2 )
    474             {
    475                 int iu = cv::saturate_cast<int>(u*cv::INTER_TAB_SIZE);
    476                 int iv = cv::saturate_cast<int>(v*cv::INTER_TAB_SIZE);
    477                 m1[j*2+0] = (short)(iu >> cv::INTER_BITS);
    478                 m1[j*2+1] = (short)(iv >> cv::INTER_BITS);
    479                 m2[j] = (ushort)((iv & (cv::INTER_TAB_SIZE-1))*cv::INTER_TAB_SIZE + (iu & (cv::INTER_TAB_SIZE-1)));
    480             }
    481             else if( m1type == CV_32FC1 )
    482             {
    483                 m1f[j] = (float)u;
    484                 m2f[j] = (float)v;
    485             }
    486 
    487             _x += iR(0, 0);
    488             _y += iR(1, 0);
    489             _w += iR(2, 0);
    490         }
    491     }
    492 }
    493 
    494 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    495 /// cv::fisheye::undistortImage
    496 
    497 void cv::fisheye::undistortImage(InputArray distorted, OutputArray undistorted,
    498         InputArray K, InputArray D, InputArray Knew, const Size& new_size)
    499 {
    500     Size size = new_size.area() != 0 ? new_size : distorted.size();
    501 
    502     cv::Mat map1, map2;
    503     fisheye::initUndistortRectifyMap(K, D, cv::Matx33d::eye(), Knew, size, CV_16SC2, map1, map2 );
    504     cv::remap(distorted, undistorted, map1, map2, INTER_LINEAR, BORDER_CONSTANT);
    505 }
    506 
    507 
    508 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    509 /// cv::fisheye::estimateNewCameraMatrixForUndistortRectify
    510 
    511 void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
    512     OutputArray P, double balance, const Size& new_size, double fov_scale)
    513 {
    514     CV_Assert( K.size() == Size(3, 3)       && (K.depth() == CV_32F || K.depth() == CV_64F));
    515     CV_Assert((D.empty() || D.total() == 4) && (D.depth() == CV_32F || D.depth() == CV_64F || D.empty()));
    516 
    517     int w = image_size.width, h = image_size.height;
    518     balance = std::min(std::max(balance, 0.0), 1.0);
    519 
    520     cv::Mat points(1, 4, CV_64FC2);
    521     Vec2d* pptr = points.ptr<Vec2d>();
    522     pptr[0] = Vec2d(w/2, 0);
    523     pptr[1] = Vec2d(w, h/2);
    524     pptr[2] = Vec2d(w/2, h);
    525     pptr[3] = Vec2d(0, h/2);
    526 
    527 #if 0
    528     const int N = 10;
    529     cv::Mat points(1, N * 4, CV_64FC2);
    530     Vec2d* pptr = points.ptr<Vec2d>();
    531     for(int i = 0, k = 0; i < 10; ++i)
    532     {
    533         pptr[k++] = Vec2d(w/2,   0) - Vec2d(w/8,   0) + Vec2d(w/4/N*i,   0);
    534         pptr[k++] = Vec2d(w/2, h-1) - Vec2d(w/8, h-1) + Vec2d(w/4/N*i, h-1);
    535 
    536         pptr[k++] = Vec2d(0,   h/2) - Vec2d(0,   h/8) + Vec2d(0,   h/4/N*i);
    537         pptr[k++] = Vec2d(w-1, h/2) - Vec2d(w-1, h/8) + Vec2d(w-1, h/4/N*i);
    538     }
    539 #endif
    540 
    541     fisheye::undistortPoints(points, points, K, D, R);
    542     cv::Scalar center_mass = mean(points);
    543     cv::Vec2d cn(center_mass.val);
    544 
    545     double aspect_ratio = (K.depth() == CV_32F) ? K.getMat().at<float >(0,0)/K.getMat().at<float> (1,1)
    546                                                 : K.getMat().at<double>(0,0)/K.getMat().at<double>(1,1);
    547 
    548     // convert to identity ratio
    549     cn[0] *= aspect_ratio;
    550     for(size_t i = 0; i < points.total(); ++i)
    551         pptr[i][1] *= aspect_ratio;
    552 
    553     double minx = DBL_MAX, miny = DBL_MAX, maxx = -DBL_MAX, maxy = -DBL_MAX;
    554     for(size_t i = 0; i < points.total(); ++i)
    555     {
    556         miny = std::min(miny, pptr[i][1]);
    557         maxy = std::max(maxy, pptr[i][1]);
    558         minx = std::min(minx, pptr[i][0]);
    559         maxx = std::max(maxx, pptr[i][0]);
    560     }
    561 
    562 #if 0
    563     double minx = -DBL_MAX, miny = -DBL_MAX, maxx = DBL_MAX, maxy = DBL_MAX;
    564     for(size_t i = 0; i < points.total(); ++i)
    565     {
    566         if (i % 4 == 0) miny = std::max(miny, pptr[i][1]);
    567         if (i % 4 == 1) maxy = std::min(maxy, pptr[i][1]);
    568         if (i % 4 == 2) minx = std::max(minx, pptr[i][0]);
    569         if (i % 4 == 3) maxx = std::min(maxx, pptr[i][0]);
    570     }
    571 #endif
    572 
    573     double f1 = w * 0.5/(cn[0] - minx);
    574     double f2 = w * 0.5/(maxx - cn[0]);
    575     double f3 = h * 0.5 * aspect_ratio/(cn[1] - miny);
    576     double f4 = h * 0.5 * aspect_ratio/(maxy - cn[1]);
    577 
    578     double fmin = std::min(f1, std::min(f2, std::min(f3, f4)));
    579     double fmax = std::max(f1, std::max(f2, std::max(f3, f4)));
    580 
    581     double f = balance * fmin + (1.0 - balance) * fmax;
    582     f *= fov_scale > 0 ? 1.0/fov_scale : 1.0;
    583 
    584     cv::Vec2d new_f(f, f), new_c = -cn * f + Vec2d(w, h * aspect_ratio) * 0.5;
    585 
    586     // restore aspect ratio
    587     new_f[1] /= aspect_ratio;
    588     new_c[1] /= aspect_ratio;
    589 
    590     if (new_size.area() > 0)
    591     {
    592         double rx = new_size.width /(double)image_size.width;
    593         double ry = new_size.height/(double)image_size.height;
    594 
    595         new_f[0] *= rx;  new_f[1] *= ry;
    596         new_c[0] *= rx;  new_c[1] *= ry;
    597     }
    598 
    599     Mat(Matx33d(new_f[0], 0, new_c[0],
    600                 0, new_f[1], new_c[1],
    601                 0,        0,       1)).convertTo(P, P.empty() ? K.type() : P.type());
    602 }
    603 
    604 
    605 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    606 /// cv::fisheye::stereoRectify
    607 
    608 void cv::fisheye::stereoRectify( InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size& imageSize,
    609         InputArray _R, InputArray _tvec, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2,
    610         OutputArray Q, int flags, const Size& newImageSize, double balance, double fov_scale)
    611 {
    612     CV_Assert((_R.size() == Size(3, 3) || _R.total() * _R.channels() == 3) && (_R.depth() == CV_32F || _R.depth() == CV_64F));
    613     CV_Assert(_tvec.total() * _tvec.channels() == 3 && (_tvec.depth() == CV_32F || _tvec.depth() == CV_64F));
    614 
    615 
    616     cv::Mat aaa = _tvec.getMat().reshape(3, 1);
    617 
    618     Vec3d rvec; // Rodrigues vector
    619     if (_R.size() == Size(3, 3))
    620     {
    621         cv::Matx33d rmat;
    622         _R.getMat().convertTo(rmat, CV_64F);
    623         rvec = Affine3d(rmat).rvec();
    624     }
    625     else if (_R.total() * _R.channels() == 3)
    626         _R.getMat().convertTo(rvec, CV_64F);
    627 
    628     Vec3d tvec;
    629     _tvec.getMat().convertTo(tvec, CV_64F);
    630 
    631     // rectification algorithm
    632     rvec *= -0.5;              // get average rotation
    633 
    634     Matx33d r_r;
    635     Rodrigues(rvec, r_r);  // rotate cameras to same orientation by averaging
    636 
    637     Vec3d t = r_r * tvec;
    638     Vec3d uu(t[0] > 0 ? 1 : -1, 0, 0);
    639 
    640     // calculate global Z rotation
    641     Vec3d ww = t.cross(uu);
    642     double nw = norm(ww);
    643     if (nw > 0.0)
    644         ww *= acos(fabs(t[0])/cv::norm(t))/nw;
    645 
    646     Matx33d wr;
    647     Rodrigues(ww, wr);
    648 
    649     // apply to both views
    650     Matx33d ri1 = wr * r_r.t();
    651     Mat(ri1, false).convertTo(R1, R1.empty() ? CV_64F : R1.type());
    652     Matx33d ri2 = wr * r_r;
    653     Mat(ri2, false).convertTo(R2, R2.empty() ? CV_64F : R2.type());
    654     Vec3d tnew = ri2 * tvec;
    655 
    656     // calculate projection/camera matrices. these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
    657     Matx33d newK1, newK2;
    658     estimateNewCameraMatrixForUndistortRectify(K1, D1, imageSize, R1, newK1, balance, newImageSize, fov_scale);
    659     estimateNewCameraMatrixForUndistortRectify(K2, D2, imageSize, R2, newK2, balance, newImageSize, fov_scale);
    660 
    661     double fc_new = std::min(newK1(1,1), newK2(1,1));
    662     Point2d cc_new[2] = { Vec2d(newK1(0, 2), newK1(1, 2)), Vec2d(newK2(0, 2), newK2(1, 2)) };
    663 
    664     // Vertical focal length must be the same for both images to keep the epipolar constraint use fy for fx also.
    665     // For simplicity, set the principal points for both cameras to be the average
    666     // of the two principal points (either one of or both x- and y- coordinates)
    667     if( flags & cv::CALIB_ZERO_DISPARITY )
    668         cc_new[0] = cc_new[1] = (cc_new[0] + cc_new[1]) * 0.5;
    669     else
    670         cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
    671 
    672     Mat(Matx34d(fc_new, 0, cc_new[0].x, 0,
    673                 0, fc_new, cc_new[0].y, 0,
    674                 0,      0,           1, 0), false).convertTo(P1, P1.empty() ? CV_64F : P1.type());
    675 
    676     Mat(Matx34d(fc_new, 0, cc_new[1].x, tnew[0]*fc_new, // baseline * focal length;,
    677                 0, fc_new, cc_new[1].y,              0,
    678                 0,      0,           1,              0), false).convertTo(P2, P2.empty() ? CV_64F : P2.type());
    679 
    680     if (Q.needed())
    681         Mat(Matx44d(1, 0, 0,           -cc_new[0].x,
    682                     0, 1, 0,           -cc_new[0].y,
    683                     0, 0, 0,            fc_new,
    684                     0, 0, -1./tnew[0], (cc_new[0].x - cc_new[1].x)/tnew[0]), false).convertTo(Q, Q.empty() ? CV_64F : Q.depth());
    685 }
    686 
    687 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    688 /// cv::fisheye::calibrate
    689 
    690 double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
    691                                     InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
    692                                     int flags , cv::TermCriteria criteria)
    693 {
    694     CV_Assert(!objectPoints.empty() && !imagePoints.empty() && objectPoints.total() == imagePoints.total());
    695     CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3);
    696     CV_Assert(imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2);
    697     CV_Assert((!K.empty() && K.size() == Size(3,3)) || K.empty());
    698     CV_Assert((!D.empty() && D.total() == 4) || D.empty());
    699     CV_Assert((!rvecs.empty() && rvecs.channels() == 3) || rvecs.empty());
    700     CV_Assert((!tvecs.empty() && tvecs.channels() == 3) || tvecs.empty());
    701 
    702     CV_Assert(((flags & CALIB_USE_INTRINSIC_GUESS) && !K.empty() && !D.empty()) || !(flags & CALIB_USE_INTRINSIC_GUESS));
    703 
    704     using namespace cv::internal;
    705     //-------------------------------Initialization
    706     IntrinsicParams finalParam;
    707     IntrinsicParams currentParam;
    708     IntrinsicParams errors;
    709 
    710     finalParam.isEstimate[0] = 1;
    711     finalParam.isEstimate[1] = 1;
    712     finalParam.isEstimate[2] = 1;
    713     finalParam.isEstimate[3] = 1;
    714     finalParam.isEstimate[4] = flags & CALIB_FIX_SKEW ? 0 : 1;
    715     finalParam.isEstimate[5] = flags & CALIB_FIX_K1 ? 0 : 1;
    716     finalParam.isEstimate[6] = flags & CALIB_FIX_K2 ? 0 : 1;
    717     finalParam.isEstimate[7] = flags & CALIB_FIX_K3 ? 0 : 1;
    718     finalParam.isEstimate[8] = flags & CALIB_FIX_K4 ? 0 : 1;
    719 
    720     const int recompute_extrinsic = flags & CALIB_RECOMPUTE_EXTRINSIC ? 1: 0;
    721     const int check_cond = flags & CALIB_CHECK_COND ? 1 : 0;
    722 
    723     const double alpha_smooth = 0.4;
    724     const double thresh_cond = 1e6;
    725     double change = 1;
    726     Vec2d err_std;
    727 
    728     Matx33d _K;
    729     Vec4d _D;
    730     if (flags & CALIB_USE_INTRINSIC_GUESS)
    731     {
    732         K.getMat().convertTo(_K, CV_64FC1);
    733         D.getMat().convertTo(_D, CV_64FC1);
    734         finalParam.Init(Vec2d(_K(0,0), _K(1, 1)),
    735                         Vec2d(_K(0,2), _K(1, 2)),
    736                         Vec4d(flags & CALIB_FIX_K1 ? 0 : _D[0],
    737                               flags & CALIB_FIX_K2 ? 0 : _D[1],
    738                               flags & CALIB_FIX_K3 ? 0 : _D[2],
    739                               flags & CALIB_FIX_K4 ? 0 : _D[3]),
    740                         _K(0, 1) / _K(0, 0));
    741     }
    742     else
    743     {
    744         finalParam.Init(Vec2d(max(image_size.width, image_size.height) / CV_PI, max(image_size.width, image_size.height) / CV_PI),
    745                         Vec2d(image_size.width  / 2.0 - 0.5, image_size.height / 2.0 - 0.5));
    746     }
    747 
    748     errors.isEstimate = finalParam.isEstimate;
    749 
    750     std::vector<Vec3d> omc(objectPoints.total()), Tc(objectPoints.total());
    751 
    752     CalibrateExtrinsics(objectPoints, imagePoints, finalParam, check_cond, thresh_cond, omc, Tc);
    753 
    754 
    755     //-------------------------------Optimization
    756     for(int iter = 0; ; ++iter)
    757     {
    758         if ((criteria.type == 1 && iter >= criteria.maxCount)  ||
    759             (criteria.type == 2 && change <= criteria.epsilon) ||
    760             (criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount)))
    761             break;
    762 
    763         double alpha_smooth2 = 1 - std::pow(1 - alpha_smooth, iter + 1.0);
    764 
    765         Mat JJ2_inv, ex3;
    766         ComputeJacobians(objectPoints, imagePoints, finalParam, omc, Tc, check_cond,thresh_cond, JJ2_inv, ex3);
    767 
    768         Mat G =  alpha_smooth2 * JJ2_inv * ex3;
    769 
    770         currentParam = finalParam + G;
    771 
    772         change = norm(Vec4d(currentParam.f[0], currentParam.f[1], currentParam.c[0], currentParam.c[1]) -
    773                 Vec4d(finalParam.f[0], finalParam.f[1], finalParam.c[0], finalParam.c[1]))
    774                 / norm(Vec4d(currentParam.f[0], currentParam.f[1], currentParam.c[0], currentParam.c[1]));
    775 
    776         finalParam = currentParam;
    777 
    778         if (recompute_extrinsic)
    779         {
    780             CalibrateExtrinsics(objectPoints,  imagePoints, finalParam, check_cond,
    781                                     thresh_cond, omc, Tc);
    782         }
    783     }
    784 
    785     //-------------------------------Validation
    786     double rms;
    787     EstimateUncertainties(objectPoints, imagePoints, finalParam,  omc, Tc, errors, err_std, thresh_cond,
    788                               check_cond, rms);
    789 
    790     //-------------------------------
    791     _K = Matx33d(finalParam.f[0], finalParam.f[0] * finalParam.alpha, finalParam.c[0],
    792             0,                    finalParam.f[1], finalParam.c[1],
    793             0,                                  0,               1);
    794 
    795     if (K.needed()) cv::Mat(_K).convertTo(K, K.empty() ? CV_64FC1 : K.type());
    796     if (D.needed()) cv::Mat(finalParam.k).convertTo(D, D.empty() ? CV_64FC1 : D.type());
    797     if (rvecs.kind()==_InputArray::STD_VECTOR_MAT)
    798     {
    799         int i;
    800         for( i = 0; i < (int)objectPoints.total(); i++ )
    801         {
    802             rvecs.getMat(i)=omc[i];
    803             tvecs.getMat(i)=Tc[i];
    804         }
    805     }
    806     else
    807     {
    808         if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type());
    809         if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type());
    810     }
    811 
    812     return rms;
    813 }
    814 
    815 //////////////////////////////////////////////////////////////////////////////////////////////////////////////
    816 /// cv::fisheye::stereoCalibrate
    817 
    818 double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
    819                                     InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
    820                                     OutputArray R, OutputArray T, int flags, TermCriteria criteria)
    821 {
    822     CV_Assert(!objectPoints.empty() && !imagePoints1.empty() && !imagePoints2.empty());
    823     CV_Assert(objectPoints.total() == imagePoints1.total() || imagePoints1.total() == imagePoints2.total());
    824     CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3);
    825     CV_Assert(imagePoints1.type() == CV_32FC2 || imagePoints1.type() == CV_64FC2);
    826     CV_Assert(imagePoints2.type() == CV_32FC2 || imagePoints2.type() == CV_64FC2);
    827 
    828     CV_Assert((!K1.empty() && K1.size() == Size(3,3)) || K1.empty());
    829     CV_Assert((!D1.empty() && D1.total() == 4) || D1.empty());
    830     CV_Assert((!K2.empty() && K1.size() == Size(3,3)) || K2.empty());
    831     CV_Assert((!D2.empty() && D1.total() == 4) || D2.empty());
    832 
    833     CV_Assert(((flags & CALIB_FIX_INTRINSIC) && !K1.empty() && !K2.empty() && !D1.empty() && !D2.empty()) || !(flags & CALIB_FIX_INTRINSIC));
    834 
    835     //-------------------------------Initialization
    836 
    837     const int threshold = 50;
    838     const double thresh_cond = 1e6;
    839     const int check_cond = 1;
    840 
    841     int n_points = (int)objectPoints.getMat(0).total();
    842     int n_images = (int)objectPoints.total();
    843 
    844     double change = 1;
    845 
    846     cv::internal::IntrinsicParams intrinsicLeft;
    847     cv::internal::IntrinsicParams intrinsicRight;
    848 
    849     cv::internal::IntrinsicParams intrinsicLeft_errors;
    850     cv::internal::IntrinsicParams intrinsicRight_errors;
    851 
    852     Matx33d _K1, _K2;
    853     Vec4d _D1, _D2;
    854     if (!K1.empty()) K1.getMat().convertTo(_K1, CV_64FC1);
    855     if (!D1.empty()) D1.getMat().convertTo(_D1, CV_64FC1);
    856     if (!K2.empty()) K2.getMat().convertTo(_K2, CV_64FC1);
    857     if (!D2.empty()) D2.getMat().convertTo(_D2, CV_64FC1);
    858 
    859     std::vector<Vec3d> rvecs1(n_images), tvecs1(n_images), rvecs2(n_images), tvecs2(n_images);
    860 
    861     if (!(flags & CALIB_FIX_INTRINSIC))
    862     {
    863         calibrate(objectPoints, imagePoints1, imageSize, _K1, _D1, rvecs1, tvecs1, flags, TermCriteria(3, 20, 1e-6));
    864         calibrate(objectPoints, imagePoints2, imageSize, _K2, _D2, rvecs2, tvecs2, flags, TermCriteria(3, 20, 1e-6));
    865     }
    866 
    867     intrinsicLeft.Init(Vec2d(_K1(0,0), _K1(1, 1)), Vec2d(_K1(0,2), _K1(1, 2)),
    868                        Vec4d(_D1[0], _D1[1], _D1[2], _D1[3]), _K1(0, 1) / _K1(0, 0));
    869 
    870     intrinsicRight.Init(Vec2d(_K2(0,0), _K2(1, 1)), Vec2d(_K2(0,2), _K2(1, 2)),
    871                         Vec4d(_D2[0], _D2[1], _D2[2], _D2[3]), _K2(0, 1) / _K2(0, 0));
    872 
    873     if ((flags & CALIB_FIX_INTRINSIC))
    874     {
    875         cv::internal::CalibrateExtrinsics(objectPoints,  imagePoints1, intrinsicLeft, check_cond, thresh_cond, rvecs1, tvecs1);
    876         cv::internal::CalibrateExtrinsics(objectPoints,  imagePoints2, intrinsicRight, check_cond, thresh_cond, rvecs2, tvecs2);
    877     }
    878 
    879     intrinsicLeft.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    880     intrinsicLeft.isEstimate[1] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    881     intrinsicLeft.isEstimate[2] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    882     intrinsicLeft.isEstimate[3] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    883     intrinsicLeft.isEstimate[4] = flags & (CALIB_FIX_SKEW | CALIB_FIX_INTRINSIC) ? 0 : 1;
    884     intrinsicLeft.isEstimate[5] = flags & (CALIB_FIX_K1 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    885     intrinsicLeft.isEstimate[6] = flags & (CALIB_FIX_K2 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    886     intrinsicLeft.isEstimate[7] = flags & (CALIB_FIX_K3 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    887     intrinsicLeft.isEstimate[8] = flags & (CALIB_FIX_K4 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    888 
    889     intrinsicRight.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    890     intrinsicRight.isEstimate[1] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    891     intrinsicRight.isEstimate[2] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    892     intrinsicRight.isEstimate[3] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
    893     intrinsicRight.isEstimate[4] = flags & (CALIB_FIX_SKEW | CALIB_FIX_INTRINSIC) ? 0 : 1;
    894     intrinsicRight.isEstimate[5] = flags & (CALIB_FIX_K1 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    895     intrinsicRight.isEstimate[6] = flags & (CALIB_FIX_K2 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    896     intrinsicRight.isEstimate[7] = flags & (CALIB_FIX_K3 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    897     intrinsicRight.isEstimate[8] = flags & (CALIB_FIX_K4 | CALIB_FIX_INTRINSIC) ? 0 : 1;
    898 
    899     intrinsicLeft_errors.isEstimate = intrinsicLeft.isEstimate;
    900     intrinsicRight_errors.isEstimate = intrinsicRight.isEstimate;
    901 
    902     std::vector<int> selectedParams;
    903     std::vector<int> tmp(6 * (n_images + 1), 1);
    904     selectedParams.insert(selectedParams.end(), intrinsicLeft.isEstimate.begin(), intrinsicLeft.isEstimate.end());
    905     selectedParams.insert(selectedParams.end(), intrinsicRight.isEstimate.begin(), intrinsicRight.isEstimate.end());
    906     selectedParams.insert(selectedParams.end(), tmp.begin(), tmp.end());
    907 
    908     //Init values for rotation and translation between two views
    909     cv::Mat om_list(1, n_images, CV_64FC3), T_list(1, n_images, CV_64FC3);
    910     cv::Mat om_ref, R_ref, T_ref, R1, R2;
    911     for (int image_idx = 0; image_idx < n_images; ++image_idx)
    912     {
    913         cv::Rodrigues(rvecs1[image_idx], R1);
    914         cv::Rodrigues(rvecs2[image_idx], R2);
    915         R_ref = R2 * R1.t();
    916         T_ref = cv::Mat(tvecs2[image_idx]) - R_ref * cv::Mat(tvecs1[image_idx]);
    917         cv::Rodrigues(R_ref, om_ref);
    918         om_ref.reshape(3, 1).copyTo(om_list.col(image_idx));
    919         T_ref.reshape(3, 1).copyTo(T_list.col(image_idx));
    920     }
    921     cv::Vec3d omcur = cv::internal::median3d(om_list);
    922     cv::Vec3d Tcur  = cv::internal::median3d(T_list);
    923 
    924     cv::Mat J = cv::Mat::zeros(4 * n_points * n_images, 18 + 6 * (n_images + 1), CV_64FC1),
    925             e = cv::Mat::zeros(4 * n_points * n_images, 1, CV_64FC1), Jkk, ekk;
    926     cv::Mat J2_inv;
    927 
    928     for(int iter = 0; ; ++iter)
    929     {
    930         if ((criteria.type == 1 && iter >= criteria.maxCount)  ||
    931             (criteria.type == 2 && change <= criteria.epsilon) ||
    932             (criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount)))
    933             break;
    934 
    935         J.create(4 * n_points * n_images, 18 + 6 * (n_images + 1), CV_64FC1);
    936         e.create(4 * n_points * n_images, 1, CV_64FC1);
    937         Jkk.create(4 * n_points, 18 + 6 * (n_images + 1), CV_64FC1);
    938         ekk.create(4 * n_points, 1, CV_64FC1);
    939 
    940         cv::Mat omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT;
    941 
    942         for (int image_idx = 0; image_idx < n_images; ++image_idx)
    943         {
    944             Jkk = cv::Mat::zeros(4 * n_points, 18 + 6 * (n_images + 1), CV_64FC1);
    945 
    946             cv::Mat object  = objectPoints.getMat(image_idx).clone();
    947             cv::Mat imageLeft  = imagePoints1.getMat(image_idx).clone();
    948             cv::Mat imageRight  = imagePoints2.getMat(image_idx).clone();
    949             cv::Mat jacobians, projected;
    950 
    951             //left camera jacobian
    952             cv::Mat rvec = cv::Mat(rvecs1[image_idx]);
    953             cv::Mat tvec  = cv::Mat(tvecs1[image_idx]);
    954             cv::internal::projectPoints(object, projected, rvec, tvec, intrinsicLeft, jacobians);
    955             cv::Mat(cv::Mat((imageLeft - projected).t()).reshape(1, 1).t()).copyTo(ekk.rowRange(0, 2 * n_points));
    956             jacobians.colRange(8, 11).copyTo(Jkk.colRange(24 + image_idx * 6, 27 + image_idx * 6).rowRange(0, 2 * n_points));
    957             jacobians.colRange(11, 14).copyTo(Jkk.colRange(27 + image_idx * 6, 30 + image_idx * 6).rowRange(0, 2 * n_points));
    958             jacobians.colRange(0, 2).copyTo(Jkk.colRange(0, 2).rowRange(0, 2 * n_points));
    959             jacobians.colRange(2, 4).copyTo(Jkk.colRange(2, 4).rowRange(0, 2 * n_points));
    960             jacobians.colRange(4, 8).copyTo(Jkk.colRange(5, 9).rowRange(0, 2 * n_points));
    961             jacobians.col(14).copyTo(Jkk.col(4).rowRange(0, 2 * n_points));
    962 
    963             //right camera jacobian
    964             cv::internal::compose_motion(rvec, tvec, omcur, Tcur, omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT);
    965             rvec = cv::Mat(rvecs2[image_idx]);
    966             tvec  = cv::Mat(tvecs2[image_idx]);
    967 
    968             cv::internal::projectPoints(object, projected, omr, Tr, intrinsicRight, jacobians);
    969             cv::Mat(cv::Mat((imageRight - projected).t()).reshape(1, 1).t()).copyTo(ekk.rowRange(2 * n_points, 4 * n_points));
    970             cv::Mat dxrdom = jacobians.colRange(8, 11) * domrdom + jacobians.colRange(11, 14) * dTrdom;
    971             cv::Mat dxrdT = jacobians.colRange(8, 11) * domrdT + jacobians.colRange(11, 14)* dTrdT;
    972             cv::Mat dxrdomckk = jacobians.colRange(8, 11) * domrdomckk + jacobians.colRange(11, 14) * dTrdomckk;
    973             cv::Mat dxrdTckk = jacobians.colRange(8, 11) * domrdTckk + jacobians.colRange(11, 14) * dTrdTckk;
    974 
    975             dxrdom.copyTo(Jkk.colRange(18, 21).rowRange(2 * n_points, 4 * n_points));
    976             dxrdT.copyTo(Jkk.colRange(21, 24).rowRange(2 * n_points, 4 * n_points));
    977             dxrdomckk.copyTo(Jkk.colRange(24 + image_idx * 6, 27 + image_idx * 6).rowRange(2 * n_points, 4 * n_points));
    978             dxrdTckk.copyTo(Jkk.colRange(27 + image_idx * 6, 30 + image_idx * 6).rowRange(2 * n_points, 4 * n_points));
    979             jacobians.colRange(0, 2).copyTo(Jkk.colRange(9 + 0, 9 + 2).rowRange(2 * n_points, 4 * n_points));
    980             jacobians.colRange(2, 4).copyTo(Jkk.colRange(9 + 2, 9 + 4).rowRange(2 * n_points, 4 * n_points));
    981             jacobians.colRange(4, 8).copyTo(Jkk.colRange(9 + 5, 9 + 9).rowRange(2 * n_points, 4 * n_points));
    982             jacobians.col(14).copyTo(Jkk.col(9 + 4).rowRange(2 * n_points, 4 * n_points));
    983 
    984             //check goodness of sterepair
    985             double abs_max  = 0;
    986             for (int i = 0; i < 4 * n_points; i++)
    987             {
    988                 if (fabs(ekk.at<double>(i)) > abs_max)
    989                 {
    990                     abs_max = fabs(ekk.at<double>(i));
    991                 }
    992             }
    993 
    994             CV_Assert(abs_max < threshold); // bad stereo pair
    995 
    996             Jkk.copyTo(J.rowRange(image_idx * 4 * n_points, (image_idx + 1) * 4 * n_points));
    997             ekk.copyTo(e.rowRange(image_idx * 4 * n_points, (image_idx + 1) * 4 * n_points));
    998         }
    999 
   1000         cv::Vec6d oldTom(Tcur[0], Tcur[1], Tcur[2], omcur[0], omcur[1], omcur[2]);
   1001 
   1002         //update all parameters
   1003         cv::subMatrix(J, J, selectedParams, std::vector<int>(J.rows, 1));
   1004         cv::Mat J2 = J.t() * J;
   1005         J2_inv = J2.inv();
   1006         int a = cv::countNonZero(intrinsicLeft.isEstimate);
   1007         int b = cv::countNonZero(intrinsicRight.isEstimate);
   1008         cv::Mat deltas = J2_inv * J.t() * e;
   1009         intrinsicLeft = intrinsicLeft + deltas.rowRange(0, a);
   1010         intrinsicRight = intrinsicRight + deltas.rowRange(a, a + b);
   1011         omcur = omcur + cv::Vec3d(deltas.rowRange(a + b, a + b + 3));
   1012         Tcur = Tcur + cv::Vec3d(deltas.rowRange(a + b + 3, a + b + 6));
   1013         for (int image_idx = 0; image_idx < n_images; ++image_idx)
   1014         {
   1015             rvecs1[image_idx] = cv::Mat(cv::Mat(rvecs1[image_idx]) + deltas.rowRange(a + b + 6 + image_idx * 6, a + b + 9 + image_idx * 6));
   1016             tvecs1[image_idx] = cv::Mat(cv::Mat(tvecs1[image_idx]) + deltas.rowRange(a + b + 9 + image_idx * 6, a + b + 12 + image_idx * 6));
   1017         }
   1018 
   1019         cv::Vec6d newTom(Tcur[0], Tcur[1], Tcur[2], omcur[0], omcur[1], omcur[2]);
   1020         change = cv::norm(newTom - oldTom) / cv::norm(newTom);
   1021     }
   1022 
   1023     double rms = 0;
   1024     const Vec2d* ptr_e = e.ptr<Vec2d>();
   1025     for (size_t i = 0; i < e.total() / 2; i++)
   1026     {
   1027         rms += ptr_e[i][0] * ptr_e[i][0] + ptr_e[i][1] * ptr_e[i][1];
   1028     }
   1029 
   1030     rms /= ((double)e.total() / 2.0);
   1031     rms = sqrt(rms);
   1032 
   1033     _K1 = Matx33d(intrinsicLeft.f[0], intrinsicLeft.f[0] * intrinsicLeft.alpha, intrinsicLeft.c[0],
   1034                                        0,                       intrinsicLeft.f[1], intrinsicLeft.c[1],
   1035                                        0,                                        0,                 1);
   1036 
   1037     _K2 = Matx33d(intrinsicRight.f[0], intrinsicRight.f[0] * intrinsicRight.alpha, intrinsicRight.c[0],
   1038                                         0,                        intrinsicRight.f[1], intrinsicRight.c[1],
   1039                                         0,                                          0,                  1);
   1040 
   1041     Mat _R;
   1042     Rodrigues(omcur, _R);
   1043 
   1044     if (K1.needed()) cv::Mat(_K1).convertTo(K1, K1.empty() ? CV_64FC1 : K1.type());
   1045     if (K2.needed()) cv::Mat(_K2).convertTo(K2, K2.empty() ? CV_64FC1 : K2.type());
   1046     if (D1.needed()) cv::Mat(intrinsicLeft.k).convertTo(D1, D1.empty() ? CV_64FC1 : D1.type());
   1047     if (D2.needed()) cv::Mat(intrinsicRight.k).convertTo(D2, D2.empty() ? CV_64FC1 : D2.type());
   1048     if (R.needed()) _R.convertTo(R, R.empty() ? CV_64FC1 : R.type());
   1049     if (T.needed()) cv::Mat(Tcur).convertTo(T, T.empty() ? CV_64FC1 : T.type());
   1050 
   1051     return rms;
   1052 }
   1053 
   1054 namespace cv{ namespace {
   1055 void subMatrix(const Mat& src, Mat& dst, const std::vector<int>& cols, const std::vector<int>& rows)
   1056 {
   1057     CV_Assert(src.type() == CV_64FC1);
   1058 
   1059     int nonzeros_cols = cv::countNonZero(cols);
   1060     Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
   1061 
   1062     for (int i = 0, j = 0; i < (int)cols.size(); i++)
   1063     {
   1064         if (cols[i])
   1065         {
   1066             src.col(i).copyTo(tmp.col(j++));
   1067         }
   1068     }
   1069 
   1070     int nonzeros_rows  = cv::countNonZero(rows);
   1071     Mat tmp1(nonzeros_rows, nonzeros_cols, CV_64FC1);
   1072     for (int i = 0, j = 0; i < (int)rows.size(); i++)
   1073     {
   1074         if (rows[i])
   1075         {
   1076             tmp.row(i).copyTo(tmp1.row(j++));
   1077         }
   1078     }
   1079 
   1080     dst = tmp1.clone();
   1081 }
   1082 
   1083 }}
   1084 
   1085 cv::internal::IntrinsicParams::IntrinsicParams():
   1086     f(Vec2d::all(0)), c(Vec2d::all(0)), k(Vec4d::all(0)), alpha(0), isEstimate(9,0)
   1087 {
   1088 }
   1089 
   1090 cv::internal::IntrinsicParams::IntrinsicParams(Vec2d _f, Vec2d _c, Vec4d _k, double _alpha):
   1091     f(_f), c(_c), k(_k), alpha(_alpha), isEstimate(9,0)
   1092 {
   1093 }
   1094 
   1095 cv::internal::IntrinsicParams cv::internal::IntrinsicParams::operator+(const Mat& a)
   1096 {
   1097     CV_Assert(a.type() == CV_64FC1);
   1098     IntrinsicParams tmp;
   1099     const double* ptr = a.ptr<double>();
   1100 
   1101     int j = 0;
   1102     tmp.f[0]    = this->f[0]    + (isEstimate[0] ? ptr[j++] : 0);
   1103     tmp.f[1]    = this->f[1]    + (isEstimate[1] ? ptr[j++] : 0);
   1104     tmp.c[0]    = this->c[0]    + (isEstimate[2] ? ptr[j++] : 0);
   1105     tmp.alpha   = this->alpha   + (isEstimate[4] ? ptr[j++] : 0);
   1106     tmp.c[1]    = this->c[1]    + (isEstimate[3] ? ptr[j++] : 0);
   1107     tmp.k[0]    = this->k[0]    + (isEstimate[5] ? ptr[j++] : 0);
   1108     tmp.k[1]    = this->k[1]    + (isEstimate[6] ? ptr[j++] : 0);
   1109     tmp.k[2]    = this->k[2]    + (isEstimate[7] ? ptr[j++] : 0);
   1110     tmp.k[3]    = this->k[3]    + (isEstimate[8] ? ptr[j++] : 0);
   1111 
   1112     tmp.isEstimate = isEstimate;
   1113     return tmp;
   1114 }
   1115 
   1116 cv::internal::IntrinsicParams& cv::internal::IntrinsicParams::operator =(const Mat& a)
   1117 {
   1118     CV_Assert(a.type() == CV_64FC1);
   1119     const double* ptr = a.ptr<double>();
   1120 
   1121     int j = 0;
   1122 
   1123     this->f[0]  = isEstimate[0] ? ptr[j++] : 0;
   1124     this->f[1]  = isEstimate[1] ? ptr[j++] : 0;
   1125     this->c[0]  = isEstimate[2] ? ptr[j++] : 0;
   1126     this->c[1]  = isEstimate[3] ? ptr[j++] : 0;
   1127     this->alpha = isEstimate[4] ? ptr[j++] : 0;
   1128     this->k[0]  = isEstimate[5] ? ptr[j++] : 0;
   1129     this->k[1]  = isEstimate[6] ? ptr[j++] : 0;
   1130     this->k[2]  = isEstimate[7] ? ptr[j++] : 0;
   1131     this->k[3]  = isEstimate[8] ? ptr[j++] : 0;
   1132 
   1133     return *this;
   1134 }
   1135 
   1136 void cv::internal::IntrinsicParams::Init(const cv::Vec2d& _f, const cv::Vec2d& _c, const cv::Vec4d& _k, const double& _alpha)
   1137 {
   1138     this->c = _c;
   1139     this->f = _f;
   1140     this->k = _k;
   1141     this->alpha = _alpha;
   1142 }
   1143 
   1144 void cv::internal::projectPoints(cv::InputArray objectPoints, cv::OutputArray imagePoints,
   1145                    cv::InputArray _rvec,cv::InputArray _tvec,
   1146                    const IntrinsicParams& param, cv::OutputArray jacobian)
   1147 {
   1148     CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
   1149     Matx33d K(param.f[0], param.f[0] * param.alpha, param.c[0],
   1150                        0,               param.f[1], param.c[1],
   1151                        0,                        0,         1);
   1152     fisheye::projectPoints(objectPoints, imagePoints, _rvec, _tvec, K, param.k, param.alpha, jacobian);
   1153 }
   1154 
   1155 void cv::internal::ComputeExtrinsicRefine(const Mat& imagePoints, const Mat& objectPoints, Mat& rvec,
   1156                             Mat&  tvec, Mat& J, const int MaxIter,
   1157                             const IntrinsicParams& param, const double thresh_cond)
   1158 {
   1159     CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
   1160     CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
   1161     Vec6d extrinsics(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2),
   1162                     tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
   1163     double change = 1;
   1164     int iter = 0;
   1165 
   1166     while (change > 1e-10 && iter < MaxIter)
   1167     {
   1168         std::vector<Point2d> x;
   1169         Mat jacobians;
   1170         projectPoints(objectPoints, x, rvec, tvec, param, jacobians);
   1171 
   1172         Mat ex = imagePoints - Mat(x).t();
   1173         ex = ex.reshape(1, 2);
   1174 
   1175         J = jacobians.colRange(8, 14).clone();
   1176 
   1177         SVD svd(J, SVD::NO_UV);
   1178         double condJJ = svd.w.at<double>(0)/svd.w.at<double>(5);
   1179 
   1180         if (condJJ > thresh_cond)
   1181             change = 0;
   1182         else
   1183         {
   1184             Vec6d param_innov;
   1185             solve(J, ex.reshape(1, (int)ex.total()), param_innov, DECOMP_SVD + DECOMP_NORMAL);
   1186 
   1187             Vec6d param_up = extrinsics + param_innov;
   1188             change = norm(param_innov)/norm(param_up);
   1189             extrinsics = param_up;
   1190             iter = iter + 1;
   1191 
   1192             rvec = Mat(Vec3d(extrinsics.val));
   1193             tvec = Mat(Vec3d(extrinsics.val+3));
   1194         }
   1195     }
   1196 }
   1197 
   1198 cv::Mat cv::internal::ComputeHomography(Mat m, Mat M)
   1199 {
   1200     int Np = m.cols;
   1201 
   1202     if (m.rows < 3)
   1203     {
   1204         vconcat(m, Mat::ones(1, Np, CV_64FC1), m);
   1205     }
   1206     if (M.rows < 3)
   1207     {
   1208         vconcat(M, Mat::ones(1, Np, CV_64FC1), M);
   1209     }
   1210 
   1211     divide(m, Mat::ones(3, 1, CV_64FC1) * m.row(2), m);
   1212     divide(M, Mat::ones(3, 1, CV_64FC1) * M.row(2), M);
   1213 
   1214     Mat ax = m.row(0).clone();
   1215     Mat ay = m.row(1).clone();
   1216 
   1217     double mxx = mean(ax)[0];
   1218     double myy = mean(ay)[0];
   1219 
   1220     ax = ax - mxx;
   1221     ay = ay - myy;
   1222 
   1223     double scxx = mean(abs(ax))[0];
   1224     double scyy = mean(abs(ay))[0];
   1225 
   1226     Mat Hnorm (Matx33d( 1/scxx,        0.0,     -mxx/scxx,
   1227                          0.0,     1/scyy,     -myy/scyy,
   1228                          0.0,        0.0,           1.0 ));
   1229 
   1230     Mat inv_Hnorm (Matx33d( scxx,     0,   mxx,
   1231                                     0,  scyy,   myy,
   1232                                     0,     0,     1 ));
   1233     Mat mn =  Hnorm * m;
   1234 
   1235     Mat L = Mat::zeros(2*Np, 9, CV_64FC1);
   1236 
   1237     for (int i = 0; i < Np; ++i)
   1238     {
   1239         for (int j = 0; j < 3; j++)
   1240         {
   1241             L.at<double>(2 * i, j) = M.at<double>(j, i);
   1242             L.at<double>(2 * i + 1, j + 3) = M.at<double>(j, i);
   1243             L.at<double>(2 * i, j + 6) = -mn.at<double>(0,i) * M.at<double>(j, i);
   1244             L.at<double>(2 * i + 1, j + 6) = -mn.at<double>(1,i) * M.at<double>(j, i);
   1245         }
   1246     }
   1247 
   1248     if (Np > 4) L = L.t() * L;
   1249     SVD svd(L);
   1250     Mat hh = svd.vt.row(8) / svd.vt.row(8).at<double>(8);
   1251     Mat Hrem = hh.reshape(1, 3);
   1252     Mat H = inv_Hnorm * Hrem;
   1253 
   1254     if (Np > 4)
   1255     {
   1256         Mat hhv = H.reshape(1, 9)(Rect(0, 0, 1, 8)).clone();
   1257         for (int iter = 0; iter < 10; iter++)
   1258         {
   1259             Mat mrep = H * M;
   1260             Mat J = Mat::zeros(2 * Np, 8, CV_64FC1);
   1261             Mat MMM;
   1262             divide(M, Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 2, mrep.cols, 1)), MMM);
   1263             divide(mrep, Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 2, mrep.cols, 1)), mrep);
   1264             Mat m_err = m(Rect(0,0, m.cols, 2)) - mrep(Rect(0,0, mrep.cols, 2));
   1265             m_err = Mat(m_err.t()).reshape(1, m_err.cols * m_err.rows);
   1266             Mat MMM2, MMM3;
   1267             multiply(Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 0, mrep.cols, 1)), MMM, MMM2);
   1268             multiply(Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 1, mrep.cols, 1)), MMM, MMM3);
   1269 
   1270             for (int i = 0; i < Np; ++i)
   1271             {
   1272                 for (int j = 0; j < 3; ++j)
   1273                 {
   1274                     J.at<double>(2 * i, j)         = -MMM.at<double>(j, i);
   1275                     J.at<double>(2 * i + 1, j + 3) = -MMM.at<double>(j, i);
   1276                 }
   1277 
   1278                 for (int j = 0; j < 2; ++j)
   1279                 {
   1280                     J.at<double>(2 * i, j + 6)     = MMM2.at<double>(j, i);
   1281                     J.at<double>(2 * i + 1, j + 6) = MMM3.at<double>(j, i);
   1282                 }
   1283             }
   1284             divide(M, Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0,2,mrep.cols,1)), MMM);
   1285             Mat hh_innov = (J.t() * J).inv() * (J.t()) * m_err;
   1286             Mat hhv_up = hhv - hh_innov;
   1287             Mat tmp;
   1288             vconcat(hhv_up, Mat::ones(1,1,CV_64FC1), tmp);
   1289             Mat H_up = tmp.reshape(1,3);
   1290             hhv = hhv_up;
   1291             H = H_up;
   1292         }
   1293     }
   1294     return H;
   1295 }
   1296 
   1297 cv::Mat cv::internal::NormalizePixels(const Mat& imagePoints, const IntrinsicParams& param)
   1298 {
   1299     CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
   1300 
   1301     Mat distorted((int)imagePoints.total(), 1, CV_64FC2), undistorted;
   1302     const Vec2d* ptr   = imagePoints.ptr<Vec2d>(0);
   1303     Vec2d* ptr_d = distorted.ptr<Vec2d>(0);
   1304     for (size_t i = 0; i < imagePoints.total(); ++i)
   1305     {
   1306         ptr_d[i] = (ptr[i] - param.c).mul(Vec2d(1.0 / param.f[0], 1.0 / param.f[1]));
   1307         ptr_d[i][0] = ptr_d[i][0] - param.alpha * ptr_d[i][1];
   1308     }
   1309     cv::fisheye::undistortPoints(distorted, undistorted, Matx33d::eye(), param.k);
   1310     return undistorted;
   1311 }
   1312 
   1313 void cv::internal::InitExtrinsics(const Mat& _imagePoints, const Mat& _objectPoints, const IntrinsicParams& param, Mat& omckk, Mat& Tckk)
   1314 {
   1315 
   1316     CV_Assert(!_objectPoints.empty() && _objectPoints.type() == CV_64FC3);
   1317     CV_Assert(!_imagePoints.empty() && _imagePoints.type() == CV_64FC2);
   1318 
   1319     Mat imagePointsNormalized = NormalizePixels(_imagePoints.t(), param).reshape(1).t();
   1320     Mat objectPoints = Mat(_objectPoints.t()).reshape(1).t();
   1321     Mat objectPointsMean, covObjectPoints;
   1322     Mat Rckk;
   1323     int Np = imagePointsNormalized.cols;
   1324     calcCovarMatrix(objectPoints, covObjectPoints, objectPointsMean, COVAR_NORMAL | COVAR_COLS);
   1325     SVD svd(covObjectPoints);
   1326     Mat R(svd.vt);
   1327     if (norm(R(Rect(2, 0, 1, 2))) < 1e-6)
   1328         R = Mat::eye(3,3, CV_64FC1);
   1329     if (determinant(R) < 0)
   1330         R = -R;
   1331     Mat T = -R * objectPointsMean;
   1332     Mat X_new = R * objectPoints + T * Mat::ones(1, Np, CV_64FC1);
   1333     Mat H = ComputeHomography(imagePointsNormalized, X_new(Rect(0,0,X_new.cols,2)));
   1334     double sc = .5 * (norm(H.col(0)) + norm(H.col(1)));
   1335     H = H / sc;
   1336     Mat u1 = H.col(0).clone();
   1337     u1  = u1 / norm(u1);
   1338     Mat u2 = H.col(1).clone() - u1.dot(H.col(1).clone()) * u1;
   1339     u2 = u2 / norm(u2);
   1340     Mat u3 = u1.cross(u2);
   1341     Mat RRR;
   1342     hconcat(u1, u2, RRR);
   1343     hconcat(RRR, u3, RRR);
   1344     Rodrigues(RRR, omckk);
   1345     Rodrigues(omckk, Rckk);
   1346     Tckk = H.col(2).clone();
   1347     Tckk = Tckk + Rckk * T;
   1348     Rckk = Rckk * R;
   1349     Rodrigues(Rckk, omckk);
   1350 }
   1351 
   1352 void cv::internal::CalibrateExtrinsics(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
   1353                          const IntrinsicParams& param, const int check_cond,
   1354                          const double thresh_cond, InputOutputArray omc, InputOutputArray Tc)
   1355 {
   1356     CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
   1357     CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2));
   1358     CV_Assert(omc.type() == CV_64FC3 || Tc.type() == CV_64FC3);
   1359 
   1360     if (omc.empty()) omc.create(1, (int)objectPoints.total(), CV_64FC3);
   1361     if (Tc.empty()) Tc.create(1, (int)objectPoints.total(), CV_64FC3);
   1362 
   1363     const int maxIter = 20;
   1364 
   1365     for(int image_idx = 0; image_idx < (int)imagePoints.total(); ++image_idx)
   1366     {
   1367         Mat omckk, Tckk, JJ_kk;
   1368         Mat image, object;
   1369 
   1370         objectPoints.getMat(image_idx).convertTo(object,  CV_64FC3);
   1371         imagePoints.getMat (image_idx).convertTo(image, CV_64FC2);
   1372 
   1373         InitExtrinsics(image, object, param, omckk, Tckk);
   1374 
   1375         ComputeExtrinsicRefine(image, object, omckk, Tckk, JJ_kk, maxIter, param, thresh_cond);
   1376         if (check_cond)
   1377         {
   1378             SVD svd(JJ_kk, SVD::NO_UV);
   1379             CV_Assert(svd.w.at<double>(0) / svd.w.at<double>((int)svd.w.total() - 1) < thresh_cond);
   1380         }
   1381         omckk.reshape(3,1).copyTo(omc.getMat().col(image_idx));
   1382         Tckk.reshape(3,1).copyTo(Tc.getMat().col(image_idx));
   1383     }
   1384 }
   1385 
   1386 
   1387 void cv::internal::ComputeJacobians(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
   1388                       const IntrinsicParams& param,  InputArray omc, InputArray Tc,
   1389                       const int& check_cond, const double& thresh_cond, Mat& JJ2_inv, Mat& ex3)
   1390 {
   1391     CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
   1392     CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2));
   1393 
   1394     CV_Assert(!omc.empty() && omc.type() == CV_64FC3);
   1395     CV_Assert(!Tc.empty() && Tc.type() == CV_64FC3);
   1396 
   1397     int n = (int)objectPoints.total();
   1398 
   1399     Mat JJ3 = Mat::zeros(9 + 6 * n, 9 + 6 * n, CV_64FC1);
   1400     ex3 = Mat::zeros(9 + 6 * n, 1, CV_64FC1 );
   1401 
   1402     for (int image_idx = 0; image_idx < n; ++image_idx)
   1403     {
   1404         Mat image, object;
   1405         objectPoints.getMat(image_idx).convertTo(object, CV_64FC3);
   1406         imagePoints.getMat (image_idx).convertTo(image, CV_64FC2);
   1407 
   1408         Mat om(omc.getMat().col(image_idx)), T(Tc.getMat().col(image_idx));
   1409 
   1410         std::vector<Point2d> x;
   1411         Mat jacobians;
   1412         projectPoints(object, x, om, T, param, jacobians);
   1413         Mat exkk = image.t() - Mat(x);
   1414 
   1415         Mat A(jacobians.rows, 9, CV_64FC1);
   1416         jacobians.colRange(0, 4).copyTo(A.colRange(0, 4));
   1417         jacobians.col(14).copyTo(A.col(4));
   1418         jacobians.colRange(4, 8).copyTo(A.colRange(5, 9));
   1419 
   1420         A = A.t();
   1421 
   1422         Mat B = jacobians.colRange(8, 14).clone();
   1423         B = B.t();
   1424 
   1425         JJ3(Rect(0, 0, 9, 9)) = JJ3(Rect(0, 0, 9, 9)) + A * A.t();
   1426         JJ3(Rect(9 + 6 * image_idx, 9 + 6 * image_idx, 6, 6)) = B * B.t();
   1427 
   1428         Mat AB = A * B.t();
   1429         AB.copyTo(JJ3(Rect(9 + 6 * image_idx, 0, 6, 9)));
   1430 
   1431         JJ3(Rect(0, 9 + 6 * image_idx, 9, 6)) = AB.t();
   1432         ex3(Rect(0,0,1,9)) = ex3(Rect(0,0,1,9)) + A * exkk.reshape(1, 2 * exkk.rows);
   1433 
   1434         ex3(Rect(0, 9 + 6 * image_idx, 1, 6)) = B * exkk.reshape(1, 2 * exkk.rows);
   1435 
   1436         if (check_cond)
   1437         {
   1438             Mat JJ_kk = B.t();
   1439             SVD svd(JJ_kk, SVD::NO_UV);
   1440             CV_Assert(svd.w.at<double>(0) / svd.w.at<double>(svd.w.rows - 1) < thresh_cond);
   1441         }
   1442     }
   1443 
   1444     std::vector<int> idxs(param.isEstimate);
   1445     idxs.insert(idxs.end(), 6 * n, 1);
   1446 
   1447     subMatrix(JJ3, JJ3, idxs, idxs);
   1448     subMatrix(ex3, ex3, std::vector<int>(1, 1), idxs);
   1449     JJ2_inv = JJ3.inv();
   1450 }
   1451 
   1452 void cv::internal::EstimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
   1453                            const IntrinsicParams& params, InputArray omc, InputArray Tc,
   1454                            IntrinsicParams& errors, Vec2d& std_err, double thresh_cond, int check_cond, double& rms)
   1455 {
   1456     CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
   1457     CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2));
   1458 
   1459     CV_Assert(!omc.empty() && omc.type() == CV_64FC3);
   1460     CV_Assert(!Tc.empty() && Tc.type() == CV_64FC3);
   1461 
   1462     Mat ex((int)(objectPoints.getMat(0).total() * objectPoints.total()), 1, CV_64FC2);
   1463 
   1464     for (int image_idx = 0; image_idx < (int)objectPoints.total(); ++image_idx)
   1465     {
   1466         Mat image, object;
   1467         objectPoints.getMat(image_idx).convertTo(object, CV_64FC3);
   1468         imagePoints.getMat (image_idx).convertTo(image, CV_64FC2);
   1469 
   1470         Mat om(omc.getMat().col(image_idx)), T(Tc.getMat().col(image_idx));
   1471 
   1472         std::vector<Point2d> x;
   1473         projectPoints(object, x, om, T, params, noArray());
   1474         Mat ex_ = image.t() - Mat(x);
   1475         ex_.copyTo(ex.rowRange(ex_.rows * image_idx,  ex_.rows * (image_idx + 1)));
   1476     }
   1477 
   1478     meanStdDev(ex, noArray(), std_err);
   1479     std_err *= sqrt((double)ex.total()/((double)ex.total() - 1.0));
   1480 
   1481     Mat sigma_x;
   1482     meanStdDev(ex.reshape(1, 1), noArray(), sigma_x);
   1483     sigma_x  *= sqrt(2.0 * (double)ex.total()/(2.0 * (double)ex.total() - 1.0));
   1484 
   1485     Mat _JJ2_inv, ex3;
   1486     ComputeJacobians(objectPoints, imagePoints, params, omc, Tc, check_cond, thresh_cond, _JJ2_inv, ex3);
   1487 
   1488     Mat_<double>& JJ2_inv = (Mat_<double>&)_JJ2_inv;
   1489 
   1490     sqrt(JJ2_inv, JJ2_inv);
   1491 
   1492     double s  = sigma_x.at<double>(0);
   1493     Mat r = 3 * s * JJ2_inv.diag();
   1494     errors = r;
   1495 
   1496     rms = 0;
   1497     const Vec2d* ptr_ex = ex.ptr<Vec2d>();
   1498     for (size_t i = 0; i < ex.total(); i++)
   1499     {
   1500         rms += ptr_ex[i][0] * ptr_ex[i][0] + ptr_ex[i][1] * ptr_ex[i][1];
   1501     }
   1502 
   1503     rms /= (double)ex.total();
   1504     rms = sqrt(rms);
   1505 }
   1506 
   1507 void cv::internal::dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB)
   1508 {
   1509     CV_Assert(A.getMat().cols == B.getMat().rows);
   1510     CV_Assert(A.type() == CV_64FC1 && B.type() == CV_64FC1);
   1511 
   1512     int p = A.getMat().rows;
   1513     int n = A.getMat().cols;
   1514     int q = B.getMat().cols;
   1515 
   1516     dABdA.create(p * q, p * n, CV_64FC1);
   1517     dABdB.create(p * q, q * n, CV_64FC1);
   1518 
   1519     dABdA.getMat() = Mat::zeros(p * q, p * n, CV_64FC1);
   1520     dABdB.getMat() = Mat::zeros(p * q, q * n, CV_64FC1);
   1521 
   1522     for (int i = 0; i < q; ++i)
   1523     {
   1524         for (int j = 0; j < p; ++j)
   1525         {
   1526             int ij = j + i * p;
   1527             for (int k = 0; k < n; ++k)
   1528             {
   1529                 int kj = j + k * p;
   1530                 dABdA.getMat().at<double>(ij, kj) = B.getMat().at<double>(k, i);
   1531             }
   1532         }
   1533     }
   1534 
   1535     for (int i = 0; i < q; ++i)
   1536     {
   1537         A.getMat().copyTo(dABdB.getMat().rowRange(i * p, i * p + p).colRange(i * n, i * n + n));
   1538     }
   1539 }
   1540 
   1541 void cv::internal::JRodriguesMatlab(const Mat& src, Mat& dst)
   1542 {
   1543     Mat tmp(src.cols, src.rows, src.type());
   1544     if (src.rows == 9)
   1545     {
   1546         Mat(src.row(0).t()).copyTo(tmp.col(0));
   1547         Mat(src.row(1).t()).copyTo(tmp.col(3));
   1548         Mat(src.row(2).t()).copyTo(tmp.col(6));
   1549         Mat(src.row(3).t()).copyTo(tmp.col(1));
   1550         Mat(src.row(4).t()).copyTo(tmp.col(4));
   1551         Mat(src.row(5).t()).copyTo(tmp.col(7));
   1552         Mat(src.row(6).t()).copyTo(tmp.col(2));
   1553         Mat(src.row(7).t()).copyTo(tmp.col(5));
   1554         Mat(src.row(8).t()).copyTo(tmp.col(8));
   1555     }
   1556     else
   1557     {
   1558         Mat(src.col(0).t()).copyTo(tmp.row(0));
   1559         Mat(src.col(1).t()).copyTo(tmp.row(3));
   1560         Mat(src.col(2).t()).copyTo(tmp.row(6));
   1561         Mat(src.col(3).t()).copyTo(tmp.row(1));
   1562         Mat(src.col(4).t()).copyTo(tmp.row(4));
   1563         Mat(src.col(5).t()).copyTo(tmp.row(7));
   1564         Mat(src.col(6).t()).copyTo(tmp.row(2));
   1565         Mat(src.col(7).t()).copyTo(tmp.row(5));
   1566         Mat(src.col(8).t()).copyTo(tmp.row(8));
   1567     }
   1568     dst = tmp.clone();
   1569 }
   1570 
   1571 void cv::internal::compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2,
   1572                     Mat& om3, Mat& T3, Mat& dom3dom1, Mat& dom3dT1, Mat& dom3dom2,
   1573                     Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2)
   1574 {
   1575     Mat om1 = _om1.getMat();
   1576     Mat om2 = _om2.getMat();
   1577     Mat T1 = _T1.getMat().reshape(1, 3);
   1578     Mat T2 = _T2.getMat().reshape(1, 3);
   1579 
   1580     //% Rotations:
   1581     Mat R1, R2, R3, dR1dom1(9, 3, CV_64FC1), dR2dom2;
   1582     Rodrigues(om1, R1, dR1dom1);
   1583     Rodrigues(om2, R2, dR2dom2);
   1584     JRodriguesMatlab(dR1dom1, dR1dom1);
   1585     JRodriguesMatlab(dR2dom2, dR2dom2);
   1586     R3 = R2 * R1;
   1587     Mat dR3dR2, dR3dR1;
   1588     dAB(R2, R1, dR3dR2, dR3dR1);
   1589     Mat dom3dR3;
   1590     Rodrigues(R3, om3, dom3dR3);
   1591     JRodriguesMatlab(dom3dR3, dom3dR3);
   1592     dom3dom1 = dom3dR3 * dR3dR1 * dR1dom1;
   1593     dom3dom2 = dom3dR3 * dR3dR2 * dR2dom2;
   1594     dom3dT1 = Mat::zeros(3, 3, CV_64FC1);
   1595     dom3dT2 = Mat::zeros(3, 3, CV_64FC1);
   1596 
   1597     //% Translations:
   1598     Mat T3t = R2 * T1;
   1599     Mat dT3tdR2, dT3tdT1;
   1600     dAB(R2, T1, dT3tdR2, dT3tdT1);
   1601     Mat dT3tdom2 = dT3tdR2 * dR2dom2;
   1602     T3 = T3t + T2;
   1603     dT3dT1 = dT3tdT1;
   1604     dT3dT2 = Mat::eye(3, 3, CV_64FC1);
   1605     dT3dom2 = dT3tdom2;
   1606     dT3dom1 = Mat::zeros(3, 3, CV_64FC1);
   1607 }
   1608 
   1609 double cv::internal::median(const Mat& row)
   1610 {
   1611     CV_Assert(row.type() == CV_64FC1);
   1612     CV_Assert(!row.empty() && row.rows == 1);
   1613     Mat tmp = row.clone();
   1614     sort(tmp, tmp, 0);
   1615     if ((int)tmp.total() % 2) return tmp.at<double>((int)tmp.total() / 2);
   1616     else return 0.5 *(tmp.at<double>((int)tmp.total() / 2) + tmp.at<double>((int)tmp.total() / 2 - 1));
   1617 }
   1618 
   1619 cv::Vec3d cv::internal::median3d(InputArray m)
   1620 {
   1621     CV_Assert(m.depth() == CV_64F && m.getMat().rows == 1);
   1622     Mat M = Mat(m.getMat().t()).reshape(1).t();
   1623     return Vec3d(median(M.row(0)), median(M.row(1)), median(M.row(2)));
   1624 }
   1625