Home | History | Annotate | Download | only in src
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                        Intel License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2002, Intel Corporation, all rights reserved.
     14 // Third party copyrights are property of their respective owners.
     15 //
     16 // Redistribution and use in source and binary forms, with or without modification,
     17 // are permitted provided that the following conditions are met:
     18 //
     19 //   * Redistributions of source code must retain the above copyright notice,
     20 //     this list of conditions and the following disclaimer.
     21 //
     22 //   * Redistributions in binary form must reproduce the above copyright notice,
     23 //     this list of conditions and the following disclaimer in the documentation
     24 //     and/or other materials provided with the distribution.
     25 //
     26 //   * The name of Intel Corporation may not be used to endorse or promote products
     27 //     derived from this software without specific prior written permission.
     28 //
     29 // This software is provided by the copyright holders and contributors "as is" and
     30 // any express or implied warranties, including, but not limited to, the implied
     31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     32 // In no event shall the Intel Corporation or contributors be liable for any direct,
     33 // indirect, incidental, special, exemplary, or consequential damages
     34 // (including, but not limited to, procurement of substitute goods or services;
     35 // loss of use, data, or profits; or business interruption) however caused
     36 // and on any theory of liability, whether in contract, strict liability,
     37 // or tort (including negligence or otherwise) arising in any way out of
     38 // the use of this software, even if advised of the possibility of such damage.
     39 //
     40 //M*/
     41 
     42 #include "_cvaux.h"
     43 
     44 #if _MSC_VER >= 1200
     45 #pragma warning(disable:4786) // Disable MSVC warnings in the standard library.
     46 #pragma warning(disable:4100)
     47 #pragma warning(disable:4512)
     48 #endif
     49 #include <stdio.h>
     50 #include <map>
     51 #include <algorithm>
     52 #if _MSC_VER >= 1200
     53 #pragma warning(default:4100)
     54 #pragma warning(default:4512)
     55 #endif
     56 
     57 #define ARRAY_SIZEOF(a) (sizeof(a)/sizeof((a)[0]))
     58 
     59 static void FillObjectPoints(CvPoint3D32f *obj_points, CvSize etalon_size, float square_size);
     60 static void DrawEtalon(IplImage *img, CvPoint2D32f *corners,
     61                        int corner_count, CvSize etalon_size, int draw_ordered);
     62 static void MultMatrix(float rm[4][4], const float m1[4][4], const float m2[4][4]);
     63 static void MultVectorMatrix(float rv[4], const float v[4], const float m[4][4]);
     64 static CvPoint3D32f ImageCStoWorldCS(const Cv3dTrackerCameraInfo &camera_info, CvPoint2D32f p);
     65 static bool intersection(CvPoint3D32f o1, CvPoint3D32f p1,
     66                          CvPoint3D32f o2, CvPoint3D32f p2,
     67                          CvPoint3D32f &r1, CvPoint3D32f &r2);
     68 
     69 /////////////////////////////////
     70 // cv3dTrackerCalibrateCameras //
     71 /////////////////////////////////
     72 CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
     73                    const Cv3dTrackerCameraIntrinsics camera_intrinsics[], // size is num_cameras
     74                    CvSize etalon_size,
     75                    float square_size,
     76                    IplImage *samples[],                                   // size is num_cameras
     77                    Cv3dTrackerCameraInfo camera_info[])                   // size is num_cameras
     78 {
     79     CV_FUNCNAME("cv3dTrackerCalibrateCameras");
     80     const int num_points = etalon_size.width * etalon_size.height;
     81     int cameras_done = 0;        // the number of cameras whose positions have been determined
     82     CvPoint3D32f *object_points = NULL; // real-world coordinates of checkerboard points
     83     CvPoint2D32f *points = NULL; // 2d coordinates of checkerboard points as seen by a camera
     84     IplImage *gray_img = NULL;   // temporary image for color conversion
     85     IplImage *tmp_img = NULL;    // temporary image used by FindChessboardCornerGuesses
     86     int c, i, j;
     87 
     88     if (etalon_size.width < 3 || etalon_size.height < 3)
     89         CV_ERROR(CV_StsBadArg, "Chess board size is invalid");
     90 
     91     for (c = 0; c < num_cameras; c++)
     92     {
     93         // CV_CHECK_IMAGE is not available in the cvaux library
     94         // so perform the checks inline.
     95 
     96         //CV_CALL(CV_CHECK_IMAGE(samples[c]));
     97 
     98         if( samples[c] == NULL )
     99             CV_ERROR( CV_HeaderIsNull, "Null image" );
    100 
    101         if( samples[c]->dataOrder != IPL_DATA_ORDER_PIXEL && samples[c]->nChannels > 1 )
    102             CV_ERROR( CV_BadOrder, "Unsupported image format" );
    103 
    104         if( samples[c]->maskROI != 0 || samples[c]->tileInfo != 0 )
    105             CV_ERROR( CV_StsBadArg, "Unsupported image format" );
    106 
    107         if( samples[c]->imageData == 0 )
    108             CV_ERROR( CV_BadDataPtr, "Null image data" );
    109 
    110         if( samples[c]->roi &&
    111             ((samples[c]->roi->xOffset | samples[c]->roi->yOffset
    112               | samples[c]->roi->width | samples[c]->roi->height) < 0 ||
    113              samples[c]->roi->xOffset + samples[c]->roi->width > samples[c]->width ||
    114              samples[c]->roi->yOffset + samples[c]->roi->height > samples[c]->height ||
    115              (unsigned) (samples[c]->roi->coi) > (unsigned) (samples[c]->nChannels)))
    116             CV_ERROR( CV_BadROISize, "Invalid ROI" );
    117 
    118         // End of CV_CHECK_IMAGE inline expansion
    119 
    120         if (samples[c]->depth != IPL_DEPTH_8U)
    121             CV_ERROR(CV_BadDepth, "Channel depth of source image must be 8");
    122 
    123         if (samples[c]->nChannels != 3 && samples[c]->nChannels != 1)
    124             CV_ERROR(CV_BadNumChannels, "Source image must have 1 or 3 channels");
    125     }
    126 
    127     CV_CALL(object_points = (CvPoint3D32f *)cvAlloc(num_points * sizeof(CvPoint3D32f)));
    128     CV_CALL(points = (CvPoint2D32f *)cvAlloc(num_points * sizeof(CvPoint2D32f)));
    129 
    130     // fill in the real-world coordinates of the checkerboard points
    131     FillObjectPoints(object_points, etalon_size, square_size);
    132 
    133     for (c = 0; c < num_cameras; c++)
    134     {
    135         CvSize image_size = cvSize(samples[c]->width, samples[c]->height);
    136         IplImage *img;
    137 
    138         // The input samples are not required to all have the same size or color
    139         // format. If they have different sizes, the temporary images are
    140         // reallocated as necessary.
    141         if (samples[c]->nChannels == 3)
    142         {
    143             // convert to gray
    144             if (gray_img == NULL || gray_img->width != samples[c]->width ||
    145                 gray_img->height != samples[c]->height )
    146             {
    147                 if (gray_img != NULL)
    148                     cvReleaseImage(&gray_img);
    149                 CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
    150             }
    151 
    152             CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));
    153 
    154             img = gray_img;
    155         }
    156         else
    157         {
    158             // no color conversion required
    159             img = samples[c];
    160         }
    161 
    162         if (tmp_img == NULL || tmp_img->width != samples[c]->width ||
    163             tmp_img->height != samples[c]->height )
    164         {
    165             if (tmp_img != NULL)
    166                 cvReleaseImage(&tmp_img);
    167             CV_CALL(tmp_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
    168         }
    169 
    170         int count = num_points;
    171         bool found = cvFindChessBoardCornerGuesses(img, tmp_img, 0,
    172                                                    etalon_size, points, &count) != 0;
    173         if (count == 0)
    174             continue;
    175 
    176         // If found is true, it means all the points were found (count = num_points).
    177         // If found is false but count is non-zero, it means that not all points were found.
    178 
    179         cvFindCornerSubPix(img, points, count, cvSize(5,5), cvSize(-1,-1),
    180                     cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f));
    181 
    182         // If the image origin is BL (bottom-left), fix the y coordinates
    183         // so they are relative to the true top of the image.
    184         if (samples[c]->origin == IPL_ORIGIN_BL)
    185         {
    186             for (i = 0; i < count; i++)
    187                 points[i].y = samples[c]->height - 1 - points[i].y;
    188         }
    189 
    190         if (found)
    191         {
    192             // Make sure x coordinates are increasing and y coordinates are decreasing.
    193             // (The y coordinate of point (0,0) should be the greatest, because the point
    194             // on the checkerboard that is the origin is nearest the bottom of the image.)
    195             // This is done after adjusting the y coordinates according to the image origin.
    196             if (points[0].x > points[1].x)
    197             {
    198                 // reverse points in each row
    199                 for (j = 0; j < etalon_size.height; j++)
    200                 {
    201                     CvPoint2D32f *row = &points[j*etalon_size.width];
    202                     for (i = 0; i < etalon_size.width/2; i++)
    203                         std::swap(row[i], row[etalon_size.width-i-1]);
    204                 }
    205             }
    206 
    207             if (points[0].y < points[etalon_size.width].y)
    208             {
    209                 // reverse points in each column
    210                 for (i = 0; i < etalon_size.width; i++)
    211                 {
    212                     for (j = 0; j < etalon_size.height/2; j++)
    213                         std::swap(points[i+j*etalon_size.width],
    214                                   points[i+(etalon_size.height-j-1)*etalon_size.width]);
    215                 }
    216             }
    217         }
    218 
    219         DrawEtalon(samples[c], points, count, etalon_size, found);
    220 
    221         if (!found)
    222             continue;
    223 
    224         float rotVect[3];
    225         float rotMatr[9];
    226         float transVect[3];
    227 
    228         cvFindExtrinsicCameraParams(count,
    229                                     image_size,
    230                                     points,
    231                                     object_points,
    232                                     const_cast<float *>(camera_intrinsics[c].focal_length),
    233                                     camera_intrinsics[c].principal_point,
    234                                     const_cast<float *>(camera_intrinsics[c].distortion),
    235                                     rotVect,
    236                                     transVect);
    237 
    238         // Check result against an arbitrary limit to eliminate impossible values.
    239         // (If the chess board were truly that far away, the camera wouldn't be able to
    240         // see the squares.)
    241         if (transVect[0] > 1000*square_size
    242             || transVect[1] > 1000*square_size
    243             || transVect[2] > 1000*square_size)
    244         {
    245             // ignore impossible results
    246             continue;
    247         }
    248 
    249         CvMat rotMatrDescr = cvMat(3, 3, CV_32FC1, rotMatr);
    250         CvMat rotVectDescr = cvMat(3, 1, CV_32FC1, rotVect);
    251 
    252         /* Calc rotation matrix by Rodrigues Transform */
    253         cvRodrigues2( &rotVectDescr, &rotMatrDescr );
    254 
    255         //combine the two transformations into one matrix
    256         //order is important! rotations are not commutative
    257         float tmat[4][4] = { { 1.f, 0.f, 0.f, 0.f },
    258                              { 0.f, 1.f, 0.f, 0.f },
    259                              { 0.f, 0.f, 1.f, 0.f },
    260                              { transVect[0], transVect[1], transVect[2], 1.f } };
    261 
    262         float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
    263                              { rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
    264                              { rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
    265                              { 0.f, 0.f, 0.f, 1.f } };
    266 
    267 
    268         MultMatrix(camera_info[c].mat, tmat, rmat);
    269 
    270         // change the transformation of the cameras to put them in the world coordinate
    271         // system we want to work with.
    272 
    273         // Start with an identity matrix; then fill in the values to accomplish
    274         // the desired transformation.
    275         float smat[4][4] = { { 1.f, 0.f, 0.f, 0.f },
    276                              { 0.f, 1.f, 0.f, 0.f },
    277                              { 0.f, 0.f, 1.f, 0.f },
    278                              { 0.f, 0.f, 0.f, 1.f } };
    279 
    280         // First, reflect through the origin by inverting all three axes.
    281         smat[0][0] = -1.f;
    282         smat[1][1] = -1.f;
    283         smat[2][2] = -1.f;
    284         MultMatrix(tmat, camera_info[c].mat, smat);
    285 
    286         // Scale x and y coordinates by the focal length (allowing for non-square pixels
    287         // and/or non-symmetrical lenses).
    288         smat[0][0] = 1.0f / camera_intrinsics[c].focal_length[0];
    289         smat[1][1] = 1.0f / camera_intrinsics[c].focal_length[1];
    290         smat[2][2] = 1.0f;
    291         MultMatrix(camera_info[c].mat, smat, tmat);
    292 
    293         camera_info[c].principal_point = camera_intrinsics[c].principal_point;
    294         camera_info[c].valid = true;
    295 
    296         cameras_done++;
    297     }
    298 
    299 exit:
    300     cvReleaseImage(&gray_img);
    301     cvReleaseImage(&tmp_img);
    302     cvFree(&object_points);
    303     cvFree(&points);
    304 
    305     return cameras_done == num_cameras;
    306 }
    307 
    308 // fill in the real-world coordinates of the checkerboard points
    309 static void FillObjectPoints(CvPoint3D32f *obj_points, CvSize etalon_size, float square_size)
    310 {
    311     int x, y, i;
    312 
    313     for (y = 0, i = 0; y < etalon_size.height; y++)
    314     {
    315         for (x = 0; x < etalon_size.width; x++, i++)
    316         {
    317             obj_points[i].x = square_size * x;
    318             obj_points[i].y = square_size * y;
    319             obj_points[i].z = 0;
    320         }
    321     }
    322 }
    323 
    324 
    325 // Mark the points found on the input image
    326 // The marks are drawn multi-colored if all the points were found.
    327 static void DrawEtalon(IplImage *img, CvPoint2D32f *corners,
    328                        int corner_count, CvSize etalon_size, int draw_ordered)
    329 {
    330     const int r = 4;
    331     int i;
    332     int x, y;
    333     CvPoint prev_pt = { 0, 0 };
    334     static const CvScalar rgb_colors[] = {
    335         {{0,0,255}},
    336         {{0,128,255}},
    337         {{0,200,200}},
    338         {{0,255,0}},
    339         {{200,200,0}},
    340         {{255,0,0}},
    341         {{255,0,255}} };
    342     static const CvScalar gray_colors[] = {
    343         {{80}}, {{120}}, {{160}}, {{200}}, {{100}}, {{140}}, {{180}}
    344     };
    345     const CvScalar* colors = img->nChannels == 3 ? rgb_colors : gray_colors;
    346 
    347     CvScalar color = colors[0];
    348     for (y = 0, i = 0; y < etalon_size.height; y++)
    349     {
    350         if (draw_ordered)
    351             color = colors[y % ARRAY_SIZEOF(rgb_colors)];
    352 
    353         for (x = 0; x < etalon_size.width && i < corner_count; x++, i++)
    354         {
    355             CvPoint pt;
    356             pt.x = cvRound(corners[i].x);
    357             pt.y = cvRound(corners[i].y);
    358             if (img->origin == IPL_ORIGIN_BL)
    359                 pt.y = img->height - 1 - pt.y;
    360 
    361             if (draw_ordered)
    362             {
    363                 if (i != 0)
    364                    cvLine(img, prev_pt, pt, color, 1, CV_AA);
    365                 prev_pt = pt;
    366             }
    367 
    368             cvLine( img, cvPoint(pt.x - r, pt.y - r),
    369                     cvPoint(pt.x + r, pt.y + r), color, 1, CV_AA );
    370             cvLine( img, cvPoint(pt.x - r, pt.y + r),
    371                     cvPoint(pt.x + r, pt.y - r), color, 1, CV_AA );
    372             cvCircle( img, pt, r+1, color, 1, CV_AA );
    373         }
    374     }
    375 }
    376 
    377 // Find the midpoint of the line segment between two points.
    378 static CvPoint3D32f midpoint(const CvPoint3D32f &p1, const CvPoint3D32f &p2)
    379 {
    380     return cvPoint3D32f((p1.x+p2.x)/2, (p1.y+p2.y)/2, (p1.z+p2.z)/2);
    381 }
    382 
    383 static void operator +=(CvPoint3D32f &p1, const CvPoint3D32f &p2)
    384 {
    385     p1.x += p2.x;
    386     p1.y += p2.y;
    387     p1.z += p2.z;
    388 }
    389 
    390 static CvPoint3D32f operator /(const CvPoint3D32f &p, int d)
    391 {
    392     return cvPoint3D32f(p.x/d, p.y/d, p.z/d);
    393 }
    394 
    395 static const Cv3dTracker2dTrackedObject *find(const Cv3dTracker2dTrackedObject v[], int num_objects, int id)
    396 {
    397     for (int i = 0; i < num_objects; i++)
    398     {
    399         if (v[i].id == id)
    400             return &v[i];
    401     }
    402     return NULL;
    403 }
    404 
    405 #define CAMERA_POS(c) (cvPoint3D32f((c).mat[3][0], (c).mat[3][1], (c).mat[3][2]))
    406 
    407 //////////////////////////////
    408 // cv3dTrackerLocateObjects //
    409 //////////////////////////////
    410 CV_IMPL int  cv3dTrackerLocateObjects(int num_cameras, int num_objects,
    411                  const Cv3dTrackerCameraInfo camera_info[],      // size is num_cameras
    412                  const Cv3dTracker2dTrackedObject tracking_info[], // size is num_objects*num_cameras
    413                  Cv3dTrackerTrackedObject tracked_objects[])     // size is num_objects
    414 {
    415     /*CV_FUNCNAME("cv3dTrackerLocateObjects");*/
    416     int found_objects = 0;
    417 
    418     // count how many cameras could see each object
    419     std::map<int, int> count;
    420     for (int c = 0; c < num_cameras; c++)
    421     {
    422         if (!camera_info[c].valid)
    423             continue;
    424 
    425         for (int i = 0; i < num_objects; i++)
    426         {
    427             const Cv3dTracker2dTrackedObject *o = &tracking_info[c*num_objects+i];
    428             if (o->id != -1)
    429                 count[o->id]++;
    430         }
    431     }
    432 
    433     // process each object that was seen by at least two cameras
    434     for (std::map<int, int>::iterator i = count.begin(); i != count.end(); i++)
    435     {
    436         if (i->second < 2)
    437             continue; // ignore object seen by only one camera
    438         int id = i->first;
    439 
    440         // find an approximation of the objects location for each pair of cameras that
    441         // could see this object, and average them
    442         CvPoint3D32f total = cvPoint3D32f(0, 0, 0);
    443         int weight = 0;
    444 
    445         for (int c1 = 0; c1 < num_cameras-1; c1++)
    446         {
    447             if (!camera_info[c1].valid)
    448                 continue;
    449 
    450             const Cv3dTracker2dTrackedObject *o1 = find(&tracking_info[c1*num_objects],
    451                                                         num_objects, id);
    452             if (o1 == NULL)
    453                 continue; // this camera didn't see this object
    454 
    455             CvPoint3D32f p1a = CAMERA_POS(camera_info[c1]);
    456             CvPoint3D32f p1b = ImageCStoWorldCS(camera_info[c1], o1->p);
    457 
    458             for (int c2 = c1 + 1; c2 < num_cameras; c2++)
    459             {
    460                 if (!camera_info[c2].valid)
    461                     continue;
    462 
    463                 const Cv3dTracker2dTrackedObject *o2 = find(&tracking_info[c2*num_objects],
    464                                                             num_objects, id);
    465                 if (o2 == NULL)
    466                     continue; // this camera didn't see this object
    467 
    468                 CvPoint3D32f p2a = CAMERA_POS(camera_info[c2]);
    469                 CvPoint3D32f p2b = ImageCStoWorldCS(camera_info[c2], o2->p);
    470 
    471                 // these variables are initialized simply to avoid erroneous error messages
    472                 // from the compiler
    473                 CvPoint3D32f r1 = cvPoint3D32f(0, 0, 0);
    474                 CvPoint3D32f r2 = cvPoint3D32f(0, 0, 0);
    475 
    476                 // find the intersection of the two lines (or the points of closest
    477                 // approach, if they don't intersect)
    478                 if (!intersection(p1a, p1b, p2a, p2b, r1, r2))
    479                     continue;
    480 
    481                 total += midpoint(r1, r2);
    482                 weight++;
    483             }
    484         }
    485 
    486         CvPoint3D32f center = total/weight;
    487         tracked_objects[found_objects++] = cv3dTrackerTrackedObject(id, center);
    488     }
    489 
    490     return found_objects;
    491 }
    492 
    493 #define EPS 1e-9
    494 
    495 // Compute the determinant of the 3x3 matrix represented by 3 row vectors.
    496 static inline double det(CvPoint3D32f v1, CvPoint3D32f v2, CvPoint3D32f v3)
    497 {
    498     return v1.x*v2.y*v3.z + v1.z*v2.x*v3.y + v1.y*v2.z*v3.x
    499            - v1.z*v2.y*v3.x - v1.x*v2.z*v3.y - v1.y*v2.x*v3.z;
    500 }
    501 
    502 static CvPoint3D32f operator +(CvPoint3D32f a, CvPoint3D32f b)
    503 {
    504     return cvPoint3D32f(a.x + b.x, a.y + b.y, a.z + b.z);
    505 }
    506 
    507 static CvPoint3D32f operator -(CvPoint3D32f a, CvPoint3D32f b)
    508 {
    509     return cvPoint3D32f(a.x - b.x, a.y - b.y, a.z - b.z);
    510 }
    511 
    512 static CvPoint3D32f operator *(CvPoint3D32f v, double f)
    513 {
    514     return cvPoint3D32f(f*v.x, f*v.y, f*v.z);
    515 }
    516 
    517 
    518 // Find the intersection of two lines, or if they don't intersect,
    519 // the points of closest approach.
    520 // The lines are defined by (o1,p1) and (o2, p2).
    521 // If they intersect, r1 and r2 will be the same.
    522 // Returns false on error.
    523 static bool intersection(CvPoint3D32f o1, CvPoint3D32f p1,
    524                          CvPoint3D32f o2, CvPoint3D32f p2,
    525                          CvPoint3D32f &r1, CvPoint3D32f &r2)
    526 {
    527     CvPoint3D32f x = o2 - o1;
    528     CvPoint3D32f d1 = p1 - o1;
    529     CvPoint3D32f d2 = p2 - o2;
    530 
    531     CvPoint3D32f cross = cvPoint3D32f(d1.y*d2.z - d1.z*d2.y,
    532                                       d1.z*d2.x - d1.x*d2.z,
    533                                       d1.x*d2.y - d1.y*d2.x);
    534     double den = cross.x*cross.x + cross.y*cross.y + cross.z*cross.z;
    535 
    536     if (den < EPS)
    537         return false;
    538 
    539     double t1 = det(x, d2, cross) / den;
    540     double t2 = det(x, d1, cross) / den;
    541 
    542     r1 = o1 + d1 * t1;
    543     r2 = o2 + d2 * t2;
    544 
    545     return true;
    546 }
    547 
    548 // Convert from image to camera space by transforming point p in
    549 // the image plane by the camera matrix.
    550 static CvPoint3D32f ImageCStoWorldCS(const Cv3dTrackerCameraInfo &camera_info, CvPoint2D32f p)
    551 {
    552     float tp[4];
    553     tp[0] = (float)p.x - camera_info.principal_point.x;
    554     tp[1] = (float)p.y - camera_info.principal_point.y;
    555     tp[2] = 1.f;
    556     tp[3] = 1.f;
    557 
    558     float tr[4];
    559     //multiply tp by mat to get tr
    560     MultVectorMatrix(tr, tp, camera_info.mat);
    561 
    562     return cvPoint3D32f(tr[0]/tr[3], tr[1]/tr[3], tr[2]/tr[3]);
    563 }
    564 
    565 // Multiply affine transformation m1 by the affine transformation m2 and
    566 // return the result in rm.
    567 static void MultMatrix(float rm[4][4], const float m1[4][4], const float m2[4][4])
    568 {
    569     for (int i=0; i<=3; i++)
    570         for (int j=0; j<=3; j++)
    571         {
    572             rm[i][j]= 0.0;
    573             for (int k=0; k <= 3; k++)
    574                 rm[i][j] += m1[i][k]*m2[k][j];
    575         }
    576 }
    577 
    578 // Multiply the vector v by the affine transformation matrix m and return the
    579 // result in rv.
    580 void MultVectorMatrix(float rv[4], const float v[4], const float m[4][4])
    581 {
    582     for (int i=0; i<=3; i++)
    583     {
    584         rv[i] = 0.f;
    585         for (int j=0;j<=3;j++)
    586             rv[i] += v[j] * m[j][i];
    587     }
    588 }
    589