1 #include "precomp.hpp" 2 #include <float.h> 3 #include <limits.h> 4 5 #ifdef HAVE_TEGRA_OPTIMIZATION 6 #include "tegra.hpp" 7 #endif 8 9 using namespace cv; 10 11 namespace cvtest 12 { 13 14 const char* getTypeName( int type ) 15 { 16 static const char* type_names[] = { "8u", "8s", "16u", "16s", "32s", "32f", "64f", "ptr" }; 17 return type_names[CV_MAT_DEPTH(type)]; 18 } 19 20 int typeByName( const char* name ) 21 { 22 int i; 23 for( i = 0; i < CV_DEPTH_MAX; i++ ) 24 if( strcmp(name, getTypeName(i)) == 0 ) 25 return i; 26 return -1; 27 } 28 29 string vec2str( const string& sep, const int* v, size_t nelems ) 30 { 31 char buf[32]; 32 string result = ""; 33 for( size_t i = 0; i < nelems; i++ ) 34 { 35 sprintf(buf, "%d", v[i]); 36 result += string(buf); 37 if( i < nelems - 1 ) 38 result += sep; 39 } 40 return result; 41 } 42 43 44 Size randomSize(RNG& rng, double maxSizeLog) 45 { 46 double width_log = rng.uniform(0., maxSizeLog); 47 double height_log = rng.uniform(0., maxSizeLog - width_log); 48 if( (unsigned)rng % 2 != 0 ) 49 std::swap(width_log, height_log); 50 Size sz; 51 sz.width = cvRound(exp(width_log)); 52 sz.height = cvRound(exp(height_log)); 53 return sz; 54 } 55 56 void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz) 57 { 58 int i, dims = rng.uniform(minDims, maxDims+1); 59 sz.resize(dims); 60 for( i = 0; i < dims; i++ ) 61 { 62 double v = rng.uniform(0., maxSizeLog); 63 maxSizeLog -= v; 64 sz[i] = cvRound(exp(v)); 65 } 66 for( i = 0; i < dims; i++ ) 67 { 68 int j = rng.uniform(0, dims); 69 int k = rng.uniform(0, dims); 70 std::swap(sz[j], sz[k]); 71 } 72 } 73 74 int randomType(RNG& rng, int typeMask, int minChannels, int maxChannels) 75 { 76 int channels = rng.uniform(minChannels, maxChannels+1); 77 int depth = 0; 78 CV_Assert((typeMask & _OutputArray::DEPTH_MASK_ALL) != 0); 79 for(;;) 80 { 81 depth = rng.uniform(CV_8U, CV_64F+1); 82 if( ((1 << depth) & typeMask) != 0 ) 83 break; 84 } 85 return CV_MAKETYPE(depth, channels); 86 } 87 88 double getMinVal(int depth) 89 { 90 depth = CV_MAT_DEPTH(depth); 91 double val = depth == CV_8U ? 0 : depth == CV_8S ? SCHAR_MIN : depth == CV_16U ? 0 : 92 depth == CV_16S ? SHRT_MIN : depth == CV_32S ? INT_MIN : 93 depth == CV_32F ? -FLT_MAX : depth == CV_64F ? -DBL_MAX : -1; 94 CV_Assert(val != -1); 95 return val; 96 } 97 98 double getMaxVal(int depth) 99 { 100 depth = CV_MAT_DEPTH(depth); 101 double val = depth == CV_8U ? UCHAR_MAX : depth == CV_8S ? SCHAR_MAX : depth == CV_16U ? USHRT_MAX : 102 depth == CV_16S ? SHRT_MAX : depth == CV_32S ? INT_MAX : 103 depth == CV_32F ? FLT_MAX : depth == CV_64F ? DBL_MAX : -1; 104 CV_Assert(val != -1); 105 return val; 106 } 107 108 Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi) 109 { 110 Size size0 = size; 111 if( useRoi ) 112 { 113 size0.width += std::max(rng.uniform(0, 10) - 5, 0); 114 size0.height += std::max(rng.uniform(0, 10) - 5, 0); 115 } 116 117 Mat m(size0, type); 118 119 rng.fill(m, RNG::UNIFORM, minVal, maxVal); 120 if( size0 == size ) 121 return m; 122 return m(Rect((size0.width-size.width)/2, (size0.height-size.height)/2, size.width, size.height)); 123 } 124 125 Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi) 126 { 127 int i, dims = (int)size.size(); 128 vector<int> size0(dims); 129 vector<Range> r(dims); 130 bool eqsize = true; 131 for( i = 0; i < dims; i++ ) 132 { 133 size0[i] = size[i]; 134 r[i] = Range::all(); 135 if( useRoi ) 136 { 137 size0[i] += std::max(rng.uniform(0, 5) - 2, 0); 138 r[i] = Range((size0[i] - size[i])/2, (size0[i] - size[i])/2 + size[i]); 139 } 140 eqsize = eqsize && size[i] == size0[i]; 141 } 142 143 Mat m(dims, &size0[0], type); 144 145 rng.fill(m, RNG::UNIFORM, minVal, maxVal); 146 if( eqsize ) 147 return m; 148 return m(&r[0]); 149 } 150 151 void add(const Mat& _a, double alpha, const Mat& _b, double beta, 152 Scalar gamma, Mat& c, int ctype, bool calcAbs) 153 { 154 Mat a = _a, b = _b; 155 if( a.empty() || alpha == 0 ) 156 { 157 // both alpha and beta can be 0, but at least one of a and b must be non-empty array, 158 // otherwise we do not know the size of the output (and may be type of the output, when ctype<0) 159 CV_Assert( !a.empty() || !b.empty() ); 160 if( !b.empty() ) 161 { 162 a = b; 163 alpha = beta; 164 b = Mat(); 165 beta = 0; 166 } 167 } 168 if( b.empty() || beta == 0 ) 169 { 170 b = Mat(); 171 beta = 0; 172 } 173 else 174 CV_Assert(a.size == b.size); 175 176 if( ctype < 0 ) 177 ctype = a.depth(); 178 ctype = CV_MAKETYPE(CV_MAT_DEPTH(ctype), a.channels()); 179 c.create(a.dims, &a.size[0], ctype); 180 const Mat *arrays[] = {&a, &b, &c, 0}; 181 Mat planes[3], buf[3]; 182 183 NAryMatIterator it(arrays, planes); 184 size_t i, nplanes = it.nplanes; 185 int cn=a.channels(); 186 int total = (int)planes[0].total(), maxsize = std::min(12*12*std::max(12/cn, 1), total); 187 188 CV_Assert(planes[0].rows == 1); 189 buf[0].create(1, maxsize, CV_64FC(cn)); 190 if(!b.empty()) 191 buf[1].create(1, maxsize, CV_64FC(cn)); 192 buf[2].create(1, maxsize, CV_64FC(cn)); 193 scalarToRawData(gamma, buf[2].ptr(), CV_64FC(cn), (int)(maxsize*cn)); 194 195 for( i = 0; i < nplanes; i++, ++it) 196 { 197 for( int j = 0; j < total; j += maxsize ) 198 { 199 int j2 = std::min(j + maxsize, total); 200 Mat apart0 = planes[0].colRange(j, j2); 201 Mat cpart0 = planes[2].colRange(j, j2); 202 Mat apart = buf[0].colRange(0, j2 - j); 203 204 apart0.convertTo(apart, apart.type(), alpha); 205 size_t k, n = (j2 - j)*cn; 206 double* aptr = apart.ptr<double>(); 207 const double* gptr = buf[2].ptr<double>(); 208 209 if( b.empty() ) 210 { 211 for( k = 0; k < n; k++ ) 212 aptr[k] += gptr[k]; 213 } 214 else 215 { 216 Mat bpart0 = planes[1].colRange((int)j, (int)j2); 217 Mat bpart = buf[1].colRange(0, (int)(j2 - j)); 218 bpart0.convertTo(bpart, bpart.type(), beta); 219 const double* bptr = bpart.ptr<double>(); 220 221 for( k = 0; k < n; k++ ) 222 aptr[k] += bptr[k] + gptr[k]; 223 } 224 if( calcAbs ) 225 for( k = 0; k < n; k++ ) 226 aptr[k] = fabs(aptr[k]); 227 apart.convertTo(cpart0, cpart0.type(), 1, 0); 228 } 229 } 230 } 231 232 233 template<typename _Tp1, typename _Tp2> inline void 234 convert_(const _Tp1* src, _Tp2* dst, size_t total, double alpha, double beta) 235 { 236 size_t i; 237 if( alpha == 1 && beta == 0 ) 238 for( i = 0; i < total; i++ ) 239 dst[i] = saturate_cast<_Tp2>(src[i]); 240 else if( beta == 0 ) 241 for( i = 0; i < total; i++ ) 242 dst[i] = saturate_cast<_Tp2>(src[i]*alpha); 243 else 244 for( i = 0; i < total; i++ ) 245 dst[i] = saturate_cast<_Tp2>(src[i]*alpha + beta); 246 } 247 248 template<typename _Tp> inline void 249 convertTo(const _Tp* src, void* dst, int dtype, size_t total, double alpha, double beta) 250 { 251 switch( CV_MAT_DEPTH(dtype) ) 252 { 253 case CV_8U: 254 convert_(src, (uchar*)dst, total, alpha, beta); 255 break; 256 case CV_8S: 257 convert_(src, (schar*)dst, total, alpha, beta); 258 break; 259 case CV_16U: 260 convert_(src, (ushort*)dst, total, alpha, beta); 261 break; 262 case CV_16S: 263 convert_(src, (short*)dst, total, alpha, beta); 264 break; 265 case CV_32S: 266 convert_(src, (int*)dst, total, alpha, beta); 267 break; 268 case CV_32F: 269 convert_(src, (float*)dst, total, alpha, beta); 270 break; 271 case CV_64F: 272 convert_(src, (double*)dst, total, alpha, beta); 273 break; 274 default: 275 CV_Assert(0); 276 } 277 } 278 279 void convert(const Mat& src, cv::OutputArray _dst, int dtype, double alpha, double beta) 280 { 281 if (dtype < 0) dtype = _dst.depth(); 282 283 dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), src.channels()); 284 _dst.create(src.dims, &src.size[0], dtype); 285 Mat dst = _dst.getMat(); 286 if( alpha == 0 ) 287 { 288 set( dst, Scalar::all(beta) ); 289 return; 290 } 291 if( dtype == src.type() && alpha == 1 && beta == 0 ) 292 { 293 copy( src, dst ); 294 return; 295 } 296 297 const Mat *arrays[]={&src, &dst, 0}; 298 Mat planes[2]; 299 300 NAryMatIterator it(arrays, planes); 301 size_t total = planes[0].total()*planes[0].channels(); 302 size_t i, nplanes = it.nplanes; 303 304 for( i = 0; i < nplanes; i++, ++it) 305 { 306 const uchar* sptr = planes[0].ptr(); 307 uchar* dptr = planes[1].ptr(); 308 309 switch( src.depth() ) 310 { 311 case CV_8U: 312 convertTo((const uchar*)sptr, dptr, dtype, total, alpha, beta); 313 break; 314 case CV_8S: 315 convertTo((const schar*)sptr, dptr, dtype, total, alpha, beta); 316 break; 317 case CV_16U: 318 convertTo((const ushort*)sptr, dptr, dtype, total, alpha, beta); 319 break; 320 case CV_16S: 321 convertTo((const short*)sptr, dptr, dtype, total, alpha, beta); 322 break; 323 case CV_32S: 324 convertTo((const int*)sptr, dptr, dtype, total, alpha, beta); 325 break; 326 case CV_32F: 327 convertTo((const float*)sptr, dptr, dtype, total, alpha, beta); 328 break; 329 case CV_64F: 330 convertTo((const double*)sptr, dptr, dtype, total, alpha, beta); 331 break; 332 } 333 } 334 } 335 336 337 void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask) 338 { 339 dst.create(src.dims, &src.size[0], src.type()); 340 341 if(mask.empty()) 342 { 343 const Mat* arrays[] = {&src, &dst, 0}; 344 Mat planes[2]; 345 NAryMatIterator it(arrays, planes); 346 size_t i, nplanes = it.nplanes; 347 size_t planeSize = planes[0].total()*src.elemSize(); 348 349 for( i = 0; i < nplanes; i++, ++it ) 350 memcpy(planes[1].ptr(), planes[0].ptr(), planeSize); 351 352 return; 353 } 354 355 CV_Assert( src.size == mask.size && mask.type() == CV_8U ); 356 357 const Mat *arrays[]={&src, &dst, &mask, 0}; 358 Mat planes[3]; 359 360 NAryMatIterator it(arrays, planes); 361 size_t j, k, elemSize = src.elemSize(), total = planes[0].total(); 362 size_t i, nplanes = it.nplanes; 363 364 for( i = 0; i < nplanes; i++, ++it) 365 { 366 const uchar* sptr = planes[0].ptr(); 367 uchar* dptr = planes[1].ptr(); 368 const uchar* mptr = planes[2].ptr(); 369 370 for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize ) 371 { 372 if( (mptr[j] != 0) ^ invertMask ) 373 for( k = 0; k < elemSize; k++ ) 374 dptr[k] = sptr[k]; 375 } 376 } 377 } 378 379 380 void set(Mat& dst, const Scalar& gamma, const Mat& mask) 381 { 382 double buf[12]; 383 scalarToRawData(gamma, &buf, dst.type(), dst.channels()); 384 const uchar* gptr = (const uchar*)&buf[0]; 385 386 if(mask.empty()) 387 { 388 const Mat* arrays[] = {&dst, 0}; 389 Mat plane; 390 NAryMatIterator it(arrays, &plane); 391 size_t i, nplanes = it.nplanes; 392 size_t j, k, elemSize = dst.elemSize(), planeSize = plane.total()*elemSize; 393 394 for( k = 1; k < elemSize; k++ ) 395 if( gptr[k] != gptr[0] ) 396 break; 397 bool uniform = k >= elemSize; 398 399 for( i = 0; i < nplanes; i++, ++it ) 400 { 401 uchar* dptr = plane.ptr(); 402 if( uniform ) 403 memset( dptr, gptr[0], planeSize ); 404 else if( i == 0 ) 405 { 406 for( j = 0; j < planeSize; j += elemSize, dptr += elemSize ) 407 for( k = 0; k < elemSize; k++ ) 408 dptr[k] = gptr[k]; 409 } 410 else 411 memcpy(dptr, dst.ptr(), planeSize); 412 } 413 return; 414 } 415 416 CV_Assert( dst.size == mask.size && mask.type() == CV_8U ); 417 418 const Mat *arrays[]={&dst, &mask, 0}; 419 Mat planes[2]; 420 421 NAryMatIterator it(arrays, planes); 422 size_t j, k, elemSize = dst.elemSize(), total = planes[0].total(); 423 size_t i, nplanes = it.nplanes; 424 425 for( i = 0; i < nplanes; i++, ++it) 426 { 427 uchar* dptr = planes[0].ptr(); 428 const uchar* mptr = planes[1].ptr(); 429 430 for( j = 0; j < total; j++, dptr += elemSize ) 431 { 432 if( mptr[j] ) 433 for( k = 0; k < elemSize; k++ ) 434 dptr[k] = gptr[k]; 435 } 436 } 437 } 438 439 440 void insert(const Mat& src, Mat& dst, int coi) 441 { 442 CV_Assert( dst.size == src.size && src.depth() == dst.depth() && 443 0 <= coi && coi < dst.channels() ); 444 445 const Mat* arrays[] = {&src, &dst, 0}; 446 Mat planes[2]; 447 NAryMatIterator it(arrays, planes); 448 size_t i, nplanes = it.nplanes; 449 size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total(); 450 451 for( i = 0; i < nplanes; i++, ++it ) 452 { 453 const uchar* sptr = planes[0].ptr(); 454 uchar* dptr = planes[1].ptr() + coi*size0; 455 456 for( j = 0; j < total; j++, sptr += size0, dptr += size1 ) 457 { 458 for( k = 0; k < size0; k++ ) 459 dptr[k] = sptr[k]; 460 } 461 } 462 } 463 464 465 void extract(const Mat& src, Mat& dst, int coi) 466 { 467 dst.create( src.dims, &src.size[0], src.depth() ); 468 CV_Assert( 0 <= coi && coi < src.channels() ); 469 470 const Mat* arrays[] = {&src, &dst, 0}; 471 Mat planes[2]; 472 NAryMatIterator it(arrays, planes); 473 size_t i, nplanes = it.nplanes; 474 size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total(); 475 476 for( i = 0; i < nplanes; i++, ++it ) 477 { 478 const uchar* sptr = planes[0].ptr() + coi*size1; 479 uchar* dptr = planes[1].ptr(); 480 481 for( j = 0; j < total; j++, sptr += size0, dptr += size1 ) 482 { 483 for( k = 0; k < size1; k++ ) 484 dptr[k] = sptr[k]; 485 } 486 } 487 } 488 489 490 void transpose(const Mat& src, Mat& dst) 491 { 492 CV_Assert(src.dims == 2); 493 dst.create(src.cols, src.rows, src.type()); 494 int i, j, k, esz = (int)src.elemSize(); 495 496 for( i = 0; i < dst.rows; i++ ) 497 { 498 const uchar* sptr = src.ptr(0) + i*esz; 499 uchar* dptr = dst.ptr(i); 500 501 for( j = 0; j < dst.cols; j++, sptr += src.step[0], dptr += esz ) 502 { 503 for( k = 0; k < esz; k++ ) 504 dptr[k] = sptr[k]; 505 } 506 } 507 } 508 509 510 template<typename _Tp> static void 511 randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta) 512 { 513 for( size_t i = 0; i < total; i += cn ) 514 for( int k = 0; k < cn; k++ ) 515 { 516 int val = cvFloor( randInt(rng)*scale[k] + delta[k] ); 517 data[i + k] = saturate_cast<_Tp>(val); 518 } 519 } 520 521 522 template<typename _Tp> static void 523 randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta) 524 { 525 for( size_t i = 0; i < total; i += cn ) 526 for( int k = 0; k < cn; k++ ) 527 { 528 double val = randReal(rng)*scale[k] + delta[k]; 529 data[i + k] = saturate_cast<_Tp>(val); 530 } 531 } 532 533 534 void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 ) 535 { 536 Scalar scale = param0; 537 Scalar delta = param1; 538 double C = a.depth() < CV_32F ? 1./(65536.*65536.) : 1.; 539 540 for( int k = 0; k < 4; k++ ) 541 { 542 double s = scale.val[k] - delta.val[k]; 543 if( s >= 0 ) 544 scale.val[k] = s; 545 else 546 { 547 delta.val[k] = scale.val[k]; 548 scale.val[k] = -s; 549 } 550 scale.val[k] *= C; 551 } 552 553 const Mat *arrays[]={&a, 0}; 554 Mat plane; 555 556 NAryMatIterator it(arrays, &plane); 557 size_t i, nplanes = it.nplanes; 558 int depth = a.depth(), cn = a.channels(); 559 size_t total = plane.total()*cn; 560 561 for( i = 0; i < nplanes; i++, ++it ) 562 { 563 switch( depth ) 564 { 565 case CV_8U: 566 randUniInt_(rng, plane.ptr<uchar>(), total, cn, scale, delta); 567 break; 568 case CV_8S: 569 randUniInt_(rng, plane.ptr<schar>(), total, cn, scale, delta); 570 break; 571 case CV_16U: 572 randUniInt_(rng, plane.ptr<ushort>(), total, cn, scale, delta); 573 break; 574 case CV_16S: 575 randUniInt_(rng, plane.ptr<short>(), total, cn, scale, delta); 576 break; 577 case CV_32S: 578 randUniInt_(rng, plane.ptr<int>(), total, cn, scale, delta); 579 break; 580 case CV_32F: 581 randUniFlt_(rng, plane.ptr<float>(), total, cn, scale, delta); 582 break; 583 case CV_64F: 584 randUniFlt_(rng, plane.ptr<double>(), total, cn, scale, delta); 585 break; 586 default: 587 CV_Assert(0); 588 } 589 } 590 } 591 592 593 template<typename _Tp> static void 594 erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec) 595 { 596 int width = dst.cols*src.channels(), n = (int)ofsvec.size(); 597 const int* ofs = &ofsvec[0]; 598 599 for( int y = 0; y < dst.rows; y++ ) 600 { 601 const _Tp* sptr = src.ptr<_Tp>(y); 602 _Tp* dptr = dst.ptr<_Tp>(y); 603 604 for( int x = 0; x < width; x++ ) 605 { 606 _Tp result = sptr[x + ofs[0]]; 607 for( int i = 1; i < n; i++ ) 608 result = std::min(result, sptr[x + ofs[i]]); 609 dptr[x] = result; 610 } 611 } 612 } 613 614 615 template<typename _Tp> static void 616 dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec) 617 { 618 int width = dst.cols*src.channels(), n = (int)ofsvec.size(); 619 const int* ofs = &ofsvec[0]; 620 621 for( int y = 0; y < dst.rows; y++ ) 622 { 623 const _Tp* sptr = src.ptr<_Tp>(y); 624 _Tp* dptr = dst.ptr<_Tp>(y); 625 626 for( int x = 0; x < width; x++ ) 627 { 628 _Tp result = sptr[x + ofs[0]]; 629 for( int i = 1; i < n; i++ ) 630 result = std::max(result, sptr[x + ofs[i]]); 631 dptr[x] = result; 632 } 633 } 634 } 635 636 637 void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor, 638 int borderType, const Scalar& _borderValue) 639 { 640 //if( _src.type() == CV_16UC3 && _src.size() == Size(1, 2) ) 641 // putchar('*'); 642 Mat kernel = _kernel, src; 643 Scalar borderValue = _borderValue; 644 if( kernel.empty() ) 645 kernel = Mat::ones(3, 3, CV_8U); 646 else 647 { 648 CV_Assert( kernel.type() == CV_8U ); 649 } 650 if( anchor == Point(-1,-1) ) 651 anchor = Point(kernel.cols/2, kernel.rows/2); 652 if( borderType == BORDER_CONSTANT ) 653 borderValue = getMaxVal(src.depth()); 654 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1, 655 anchor.x, kernel.cols - anchor.x - 1, 656 borderType, borderValue); 657 dst.create( _src.size(), src.type() ); 658 659 vector<int> ofs; 660 int step = (int)(src.step/src.elemSize1()), cn = src.channels(); 661 for( int i = 0; i < kernel.rows; i++ ) 662 for( int j = 0; j < kernel.cols; j++ ) 663 if( kernel.at<uchar>(i, j) != 0 ) 664 ofs.push_back(i*step + j*cn); 665 if( ofs.empty() ) 666 ofs.push_back(anchor.y*step + anchor.x*cn); 667 668 switch( src.depth() ) 669 { 670 case CV_8U: 671 erode_<uchar>(src, dst, ofs); 672 break; 673 case CV_8S: 674 erode_<schar>(src, dst, ofs); 675 break; 676 case CV_16U: 677 erode_<ushort>(src, dst, ofs); 678 break; 679 case CV_16S: 680 erode_<short>(src, dst, ofs); 681 break; 682 case CV_32S: 683 erode_<int>(src, dst, ofs); 684 break; 685 case CV_32F: 686 erode_<float>(src, dst, ofs); 687 break; 688 case CV_64F: 689 erode_<double>(src, dst, ofs); 690 break; 691 default: 692 CV_Assert(0); 693 } 694 } 695 696 void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor, 697 int borderType, const Scalar& _borderValue) 698 { 699 Mat kernel = _kernel, src; 700 Scalar borderValue = _borderValue; 701 if( kernel.empty() ) 702 kernel = Mat::ones(3, 3, CV_8U); 703 else 704 { 705 CV_Assert( kernel.type() == CV_8U ); 706 } 707 if( anchor == Point(-1,-1) ) 708 anchor = Point(kernel.cols/2, kernel.rows/2); 709 if( borderType == BORDER_CONSTANT ) 710 borderValue = getMinVal(src.depth()); 711 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1, 712 anchor.x, kernel.cols - anchor.x - 1, 713 borderType, borderValue); 714 dst.create( _src.size(), src.type() ); 715 716 vector<int> ofs; 717 int step = (int)(src.step/src.elemSize1()), cn = src.channels(); 718 for( int i = 0; i < kernel.rows; i++ ) 719 for( int j = 0; j < kernel.cols; j++ ) 720 if( kernel.at<uchar>(i, j) != 0 ) 721 ofs.push_back(i*step + j*cn); 722 if( ofs.empty() ) 723 ofs.push_back(anchor.y*step + anchor.x*cn); 724 725 switch( src.depth() ) 726 { 727 case CV_8U: 728 dilate_<uchar>(src, dst, ofs); 729 break; 730 case CV_8S: 731 dilate_<schar>(src, dst, ofs); 732 break; 733 case CV_16U: 734 dilate_<ushort>(src, dst, ofs); 735 break; 736 case CV_16S: 737 dilate_<short>(src, dst, ofs); 738 break; 739 case CV_32S: 740 dilate_<int>(src, dst, ofs); 741 break; 742 case CV_32F: 743 dilate_<float>(src, dst, ofs); 744 break; 745 case CV_64F: 746 dilate_<double>(src, dst, ofs); 747 break; 748 default: 749 CV_Assert(0); 750 } 751 } 752 753 754 template<typename _Tp> static void 755 filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<double>& coeffvec) 756 { 757 const int* ofs = &ofsvec[0]; 758 const double* coeff = &coeffvec[0]; 759 int width = dst.cols*dst.channels(), ncoeffs = (int)ofsvec.size(); 760 761 for( int y = 0; y < dst.rows; y++ ) 762 { 763 const _Tp* sptr = src.ptr<_Tp>(y); 764 double* dptr = dst.ptr<double>(y); 765 766 for( int x = 0; x < width; x++ ) 767 { 768 double s = 0; 769 for( int i = 0; i < ncoeffs; i++ ) 770 s += sptr[x + ofs[i]]*coeff[i]; 771 dptr[x] = s; 772 } 773 } 774 } 775 776 777 void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel, 778 Point anchor, double delta, int borderType, const Scalar& _borderValue) 779 { 780 Mat src, _dst; 781 Scalar borderValue = _borderValue; 782 CV_Assert( kernel.type() == CV_32F || kernel.type() == CV_64F ); 783 if( anchor == Point(-1,-1) ) 784 anchor = Point(kernel.cols/2, kernel.rows/2); 785 if( borderType == BORDER_CONSTANT ) 786 borderValue = getMinVal(src.depth()); 787 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1, 788 anchor.x, kernel.cols - anchor.x - 1, 789 borderType, borderValue); 790 _dst.create( _src.size(), CV_MAKETYPE(CV_64F, src.channels()) ); 791 792 vector<int> ofs; 793 vector<double> coeff(kernel.rows*kernel.cols); 794 Mat cmat(kernel.rows, kernel.cols, CV_64F, &coeff[0]); 795 convert(kernel, cmat, cmat.type()); 796 797 int step = (int)(src.step/src.elemSize1()), cn = src.channels(); 798 for( int i = 0; i < kernel.rows; i++ ) 799 for( int j = 0; j < kernel.cols; j++ ) 800 ofs.push_back(i*step + j*cn); 801 802 switch( src.depth() ) 803 { 804 case CV_8U: 805 filter2D_<uchar>(src, _dst, ofs, coeff); 806 break; 807 case CV_8S: 808 filter2D_<schar>(src, _dst, ofs, coeff); 809 break; 810 case CV_16U: 811 filter2D_<ushort>(src, _dst, ofs, coeff); 812 break; 813 case CV_16S: 814 filter2D_<short>(src, _dst, ofs, coeff); 815 break; 816 case CV_32S: 817 filter2D_<int>(src, _dst, ofs, coeff); 818 break; 819 case CV_32F: 820 filter2D_<float>(src, _dst, ofs, coeff); 821 break; 822 case CV_64F: 823 filter2D_<double>(src, _dst, ofs, coeff); 824 break; 825 default: 826 CV_Assert(0); 827 } 828 829 convert(_dst, dst, ddepth, 1, delta); 830 } 831 832 833 static int borderInterpolate( int p, int len, int borderType ) 834 { 835 if( (unsigned)p < (unsigned)len ) 836 ; 837 else if( borderType == BORDER_REPLICATE ) 838 p = p < 0 ? 0 : len - 1; 839 else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 ) 840 { 841 int delta = borderType == BORDER_REFLECT_101; 842 if( len == 1 ) 843 return 0; 844 do 845 { 846 if( p < 0 ) 847 p = -p - 1 + delta; 848 else 849 p = len - 1 - (p - len) - delta; 850 } 851 while( (unsigned)p >= (unsigned)len ); 852 } 853 else if( borderType == BORDER_WRAP ) 854 { 855 if( p < 0 ) 856 p -= ((p-len+1)/len)*len; 857 if( p >= len ) 858 p %= len; 859 } 860 else if( borderType == BORDER_CONSTANT ) 861 p = -1; 862 else 863 CV_Error( Error::StsBadArg, "Unknown/unsupported border type" ); 864 return p; 865 } 866 867 868 void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right, 869 int borderType, const Scalar& borderValue) 870 { 871 dst.create(src.rows + top + bottom, src.cols + left + right, src.type()); 872 int i, j, k, esz = (int)src.elemSize(); 873 int width = src.cols*esz, width1 = dst.cols*esz; 874 875 if( borderType == BORDER_CONSTANT ) 876 { 877 vector<uchar> valvec((src.cols + left + right)*esz); 878 uchar* val = &valvec[0]; 879 scalarToRawData(borderValue, val, src.type(), (src.cols + left + right)*src.channels()); 880 881 left *= esz; 882 right *= esz; 883 for( i = 0; i < src.rows; i++ ) 884 { 885 const uchar* sptr = src.ptr(i); 886 uchar* dptr = dst.ptr(i + top) + left; 887 for( j = 0; j < left; j++ ) 888 dptr[j - left] = val[j]; 889 if( dptr != sptr ) 890 for( j = 0; j < width; j++ ) 891 dptr[j] = sptr[j]; 892 for( j = 0; j < right; j++ ) 893 dptr[j + width] = val[j]; 894 } 895 896 for( i = 0; i < top; i++ ) 897 { 898 uchar* dptr = dst.ptr(i); 899 for( j = 0; j < width1; j++ ) 900 dptr[j] = val[j]; 901 } 902 903 for( i = 0; i < bottom; i++ ) 904 { 905 uchar* dptr = dst.ptr(i + top + src.rows); 906 for( j = 0; j < width1; j++ ) 907 dptr[j] = val[j]; 908 } 909 } 910 else 911 { 912 vector<int> tabvec((left + right)*esz + 1); 913 int* ltab = &tabvec[0]; 914 int* rtab = &tabvec[left*esz]; 915 for( i = 0; i < left; i++ ) 916 { 917 j = borderInterpolate(i - left, src.cols, borderType)*esz; 918 for( k = 0; k < esz; k++ ) 919 ltab[i*esz + k] = j + k; 920 } 921 for( i = 0; i < right; i++ ) 922 { 923 j = borderInterpolate(src.cols + i, src.cols, borderType)*esz; 924 for( k = 0; k < esz; k++ ) 925 rtab[i*esz + k] = j + k; 926 } 927 928 left *= esz; 929 right *= esz; 930 for( i = 0; i < src.rows; i++ ) 931 { 932 const uchar* sptr = src.ptr(i); 933 uchar* dptr = dst.ptr(i + top); 934 935 for( j = 0; j < left; j++ ) 936 dptr[j] = sptr[ltab[j]]; 937 if( dptr + left != sptr ) 938 { 939 for( j = 0; j < width; j++ ) 940 dptr[j + left] = sptr[j]; 941 } 942 for( j = 0; j < right; j++ ) 943 dptr[j + left + width] = sptr[rtab[j]]; 944 } 945 946 for( i = 0; i < top; i++ ) 947 { 948 j = borderInterpolate(i - top, src.rows, borderType); 949 const uchar* sptr = dst.ptr(j + top); 950 uchar* dptr = dst.ptr(i); 951 952 for( k = 0; k < width1; k++ ) 953 dptr[k] = sptr[k]; 954 } 955 956 for( i = 0; i < bottom; i++ ) 957 { 958 j = borderInterpolate(i + src.rows, src.rows, borderType); 959 const uchar* sptr = dst.ptr(j + top); 960 uchar* dptr = dst.ptr(i + top + src.rows); 961 962 for( k = 0; k < width1; k++ ) 963 dptr[k] = sptr[k]; 964 } 965 } 966 } 967 968 969 template<typename _Tp> static void 970 minMaxLoc_(const _Tp* src, size_t total, size_t startidx, 971 double* _minval, double* _maxval, 972 size_t* _minpos, size_t* _maxpos, 973 const uchar* mask) 974 { 975 _Tp maxval = saturate_cast<_Tp>(*_maxval), minval = saturate_cast<_Tp>(*_minval); 976 size_t minpos = *_minpos, maxpos = *_maxpos; 977 978 if( !mask ) 979 { 980 for( size_t i = 0; i < total; i++ ) 981 { 982 _Tp val = src[i]; 983 if( minval > val ) 984 { 985 minval = val; 986 minpos = startidx + i; 987 } 988 if( maxval < val ) 989 { 990 maxval = val; 991 maxpos = startidx + i; 992 } 993 } 994 } 995 else 996 { 997 for( size_t i = 0; i < total; i++ ) 998 { 999 _Tp val = src[i]; 1000 if( minval > val && mask[i] ) 1001 { 1002 minval = val; 1003 minpos = startidx + i; 1004 } 1005 if( maxval < val && mask[i] ) 1006 { 1007 maxval = val; 1008 maxpos = startidx + i; 1009 } 1010 } 1011 } 1012 1013 *_maxval = maxval; 1014 *_minval = minval; 1015 *_maxpos = maxpos; 1016 *_minpos = minpos; 1017 } 1018 1019 1020 static void setpos( const Mat& mtx, vector<int>& pos, size_t idx ) 1021 { 1022 pos.resize(mtx.dims); 1023 if( idx > 0 ) 1024 { 1025 idx--; 1026 for( int i = mtx.dims-1; i >= 0; i-- ) 1027 { 1028 int sz = mtx.size[i]*(i == mtx.dims-1 ? mtx.channels() : 1); 1029 pos[i] = (int)(idx % sz); 1030 idx /= sz; 1031 } 1032 } 1033 else 1034 { 1035 for( int i = mtx.dims-1; i >= 0; i-- ) 1036 pos[i] = -1; 1037 } 1038 } 1039 1040 void minMaxLoc(const Mat& src, double* _minval, double* _maxval, 1041 vector<int>* _minloc, vector<int>* _maxloc, 1042 const Mat& mask) 1043 { 1044 CV_Assert( src.channels() == 1 ); 1045 const Mat *arrays[]={&src, &mask, 0}; 1046 Mat planes[2]; 1047 1048 NAryMatIterator it(arrays, planes); 1049 size_t startidx = 1, total = planes[0].total(); 1050 size_t i, nplanes = it.nplanes; 1051 int depth = src.depth(); 1052 double maxval = depth < CV_32F ? INT_MIN : depth == CV_32F ? -FLT_MAX : -DBL_MAX; 1053 double minval = depth < CV_32F ? INT_MAX : depth == CV_32F ? FLT_MAX : DBL_MAX; 1054 size_t maxidx = 0, minidx = 0; 1055 1056 for( i = 0; i < nplanes; i++, ++it, startidx += total ) 1057 { 1058 const uchar* sptr = planes[0].ptr(); 1059 const uchar* mptr = planes[1].ptr(); 1060 1061 switch( depth ) 1062 { 1063 case CV_8U: 1064 minMaxLoc_((const uchar*)sptr, total, startidx, 1065 &minval, &maxval, &minidx, &maxidx, mptr); 1066 break; 1067 case CV_8S: 1068 minMaxLoc_((const schar*)sptr, total, startidx, 1069 &minval, &maxval, &minidx, &maxidx, mptr); 1070 break; 1071 case CV_16U: 1072 minMaxLoc_((const ushort*)sptr, total, startidx, 1073 &minval, &maxval, &minidx, &maxidx, mptr); 1074 break; 1075 case CV_16S: 1076 minMaxLoc_((const short*)sptr, total, startidx, 1077 &minval, &maxval, &minidx, &maxidx, mptr); 1078 break; 1079 case CV_32S: 1080 minMaxLoc_((const int*)sptr, total, startidx, 1081 &minval, &maxval, &minidx, &maxidx, mptr); 1082 break; 1083 case CV_32F: 1084 minMaxLoc_((const float*)sptr, total, startidx, 1085 &minval, &maxval, &minidx, &maxidx, mptr); 1086 break; 1087 case CV_64F: 1088 minMaxLoc_((const double*)sptr, total, startidx, 1089 &minval, &maxval, &minidx, &maxidx, mptr); 1090 break; 1091 default: 1092 CV_Assert(0); 1093 } 1094 } 1095 1096 if( minidx == 0 ) 1097 minval = maxval = 0; 1098 1099 if( _maxval ) 1100 *_maxval = maxval; 1101 if( _minval ) 1102 *_minval = minval; 1103 if( _maxloc ) 1104 setpos( src, *_maxloc, maxidx ); 1105 if( _minloc ) 1106 setpos( src, *_minloc, minidx ); 1107 } 1108 1109 1110 static int 1111 normHamming(const uchar* src, size_t total, int cellSize) 1112 { 1113 int result = 0; 1114 int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1; 1115 CV_Assert( mask >= 0 ); 1116 1117 for( size_t i = 0; i < total; i++ ) 1118 { 1119 unsigned a = src[i]; 1120 for( ; a != 0; a >>= cellSize ) 1121 result += (a & mask) != 0; 1122 } 1123 return result; 1124 } 1125 1126 1127 template<typename _Tp> static double 1128 norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask) 1129 { 1130 size_t i; 1131 double result = startval; 1132 if( !mask ) 1133 total *= cn; 1134 1135 if( normType == NORM_INF ) 1136 { 1137 if( !mask ) 1138 for( i = 0; i < total; i++ ) 1139 result = std::max(result, (double)std::abs(0+src[i]));// trick with 0 used to quiet gcc warning 1140 else 1141 for( int c = 0; c < cn; c++ ) 1142 { 1143 for( i = 0; i < total; i++ ) 1144 if( mask[i] ) 1145 result = std::max(result, (double)std::abs(0+src[i*cn + c])); 1146 } 1147 } 1148 else if( normType == NORM_L1 ) 1149 { 1150 if( !mask ) 1151 for( i = 0; i < total; i++ ) 1152 result += std::abs(0+src[i]); 1153 else 1154 for( int c = 0; c < cn; c++ ) 1155 { 1156 for( i = 0; i < total; i++ ) 1157 if( mask[i] ) 1158 result += std::abs(0+src[i*cn + c]); 1159 } 1160 } 1161 else 1162 { 1163 if( !mask ) 1164 for( i = 0; i < total; i++ ) 1165 { 1166 double v = src[i]; 1167 result += v*v; 1168 } 1169 else 1170 for( int c = 0; c < cn; c++ ) 1171 { 1172 for( i = 0; i < total; i++ ) 1173 if( mask[i] ) 1174 { 1175 double v = src[i*cn + c]; 1176 result += v*v; 1177 } 1178 } 1179 } 1180 return result; 1181 } 1182 1183 1184 template<typename _Tp> static double 1185 norm_(const _Tp* src1, const _Tp* src2, size_t total, int cn, int normType, double startval, const uchar* mask) 1186 { 1187 size_t i; 1188 double result = startval; 1189 if( !mask ) 1190 total *= cn; 1191 1192 if( normType == NORM_INF ) 1193 { 1194 if( !mask ) 1195 for( i = 0; i < total; i++ ) 1196 result = std::max(result, (double)std::abs(src1[i] - src2[i])); 1197 else 1198 for( int c = 0; c < cn; c++ ) 1199 { 1200 for( i = 0; i < total; i++ ) 1201 if( mask[i] ) 1202 result = std::max(result, (double)std::abs(src1[i*cn + c] - src2[i*cn + c])); 1203 } 1204 } 1205 else if( normType == NORM_L1 ) 1206 { 1207 if( !mask ) 1208 for( i = 0; i < total; i++ ) 1209 result += std::abs(src1[i] - src2[i]); 1210 else 1211 for( int c = 0; c < cn; c++ ) 1212 { 1213 for( i = 0; i < total; i++ ) 1214 if( mask[i] ) 1215 result += std::abs(src1[i*cn + c] - src2[i*cn + c]); 1216 } 1217 } 1218 else 1219 { 1220 if( !mask ) 1221 for( i = 0; i < total; i++ ) 1222 { 1223 double v = src1[i] - src2[i]; 1224 result += v*v; 1225 } 1226 else 1227 for( int c = 0; c < cn; c++ ) 1228 { 1229 for( i = 0; i < total; i++ ) 1230 if( mask[i] ) 1231 { 1232 double v = src1[i*cn + c] - src2[i*cn + c]; 1233 result += v*v; 1234 } 1235 } 1236 } 1237 return result; 1238 } 1239 1240 1241 double norm(InputArray _src, int normType, InputArray _mask) 1242 { 1243 Mat src = _src.getMat(), mask = _mask.getMat(); 1244 if( normType == NORM_HAMMING || normType == NORM_HAMMING2 ) 1245 { 1246 if( !mask.empty() ) 1247 { 1248 Mat temp; 1249 bitwise_and(src, mask, temp); 1250 return cvtest::norm(temp, normType, Mat()); 1251 } 1252 1253 CV_Assert( src.depth() == CV_8U ); 1254 1255 const Mat *arrays[]={&src, 0}; 1256 Mat planes[1]; 1257 1258 NAryMatIterator it(arrays, planes); 1259 size_t total = planes[0].total(); 1260 size_t i, nplanes = it.nplanes; 1261 double result = 0; 1262 int cellSize = normType == NORM_HAMMING ? 1 : 2; 1263 1264 for( i = 0; i < nplanes; i++, ++it ) 1265 result += normHamming(planes[0].ptr(), total, cellSize); 1266 return result; 1267 } 1268 int normType0 = normType; 1269 normType = normType == NORM_L2SQR ? NORM_L2 : normType; 1270 1271 CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) ); 1272 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 ); 1273 1274 const Mat *arrays[]={&src, &mask, 0}; 1275 Mat planes[2]; 1276 1277 NAryMatIterator it(arrays, planes); 1278 size_t total = planes[0].total(); 1279 size_t i, nplanes = it.nplanes; 1280 int depth = src.depth(), cn = planes[0].channels(); 1281 double result = 0; 1282 1283 for( i = 0; i < nplanes; i++, ++it ) 1284 { 1285 const uchar* sptr = planes[0].ptr(); 1286 const uchar* mptr = planes[1].ptr(); 1287 1288 switch( depth ) 1289 { 1290 case CV_8U: 1291 result = norm_((const uchar*)sptr, total, cn, normType, result, mptr); 1292 break; 1293 case CV_8S: 1294 result = norm_((const schar*)sptr, total, cn, normType, result, mptr); 1295 break; 1296 case CV_16U: 1297 result = norm_((const ushort*)sptr, total, cn, normType, result, mptr); 1298 break; 1299 case CV_16S: 1300 result = norm_((const short*)sptr, total, cn, normType, result, mptr); 1301 break; 1302 case CV_32S: 1303 result = norm_((const int*)sptr, total, cn, normType, result, mptr); 1304 break; 1305 case CV_32F: 1306 result = norm_((const float*)sptr, total, cn, normType, result, mptr); 1307 break; 1308 case CV_64F: 1309 result = norm_((const double*)sptr, total, cn, normType, result, mptr); 1310 break; 1311 default: 1312 CV_Error(Error::StsUnsupportedFormat, ""); 1313 }; 1314 } 1315 if( normType0 == NORM_L2 ) 1316 result = sqrt(result); 1317 return result; 1318 } 1319 1320 1321 double norm(InputArray _src1, InputArray _src2, int normType, InputArray _mask) 1322 { 1323 Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat(); 1324 bool isRelative = (normType & NORM_RELATIVE) != 0; 1325 normType &= ~NORM_RELATIVE; 1326 1327 if( normType == NORM_HAMMING || normType == NORM_HAMMING2 ) 1328 { 1329 Mat temp; 1330 bitwise_xor(src1, src2, temp); 1331 if( !mask.empty() ) 1332 bitwise_and(temp, mask, temp); 1333 1334 CV_Assert( temp.depth() == CV_8U ); 1335 1336 const Mat *arrays[]={&temp, 0}; 1337 Mat planes[1]; 1338 1339 NAryMatIterator it(arrays, planes); 1340 size_t total = planes[0].total(); 1341 size_t i, nplanes = it.nplanes; 1342 double result = 0; 1343 int cellSize = normType == NORM_HAMMING ? 1 : 2; 1344 1345 for( i = 0; i < nplanes; i++, ++it ) 1346 result += normHamming(planes[0].ptr(), total, cellSize); 1347 return result; 1348 } 1349 int normType0 = normType; 1350 normType = normType == NORM_L2SQR ? NORM_L2 : normType; 1351 1352 CV_Assert( src1.type() == src2.type() && src1.size == src2.size ); 1353 CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) ); 1354 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 ); 1355 const Mat *arrays[]={&src1, &src2, &mask, 0}; 1356 Mat planes[3]; 1357 1358 NAryMatIterator it(arrays, planes); 1359 size_t total = planes[0].total(); 1360 size_t i, nplanes = it.nplanes; 1361 int depth = src1.depth(), cn = planes[0].channels(); 1362 double result = 0; 1363 1364 for( i = 0; i < nplanes; i++, ++it ) 1365 { 1366 const uchar* sptr1 = planes[0].ptr(); 1367 const uchar* sptr2 = planes[1].ptr(); 1368 const uchar* mptr = planes[2].ptr(); 1369 1370 switch( depth ) 1371 { 1372 case CV_8U: 1373 result = norm_((const uchar*)sptr1, (const uchar*)sptr2, total, cn, normType, result, mptr); 1374 break; 1375 case CV_8S: 1376 result = norm_((const schar*)sptr1, (const schar*)sptr2, total, cn, normType, result, mptr); 1377 break; 1378 case CV_16U: 1379 result = norm_((const ushort*)sptr1, (const ushort*)sptr2, total, cn, normType, result, mptr); 1380 break; 1381 case CV_16S: 1382 result = norm_((const short*)sptr1, (const short*)sptr2, total, cn, normType, result, mptr); 1383 break; 1384 case CV_32S: 1385 result = norm_((const int*)sptr1, (const int*)sptr2, total, cn, normType, result, mptr); 1386 break; 1387 case CV_32F: 1388 result = norm_((const float*)sptr1, (const float*)sptr2, total, cn, normType, result, mptr); 1389 break; 1390 case CV_64F: 1391 result = norm_((const double*)sptr1, (const double*)sptr2, total, cn, normType, result, mptr); 1392 break; 1393 default: 1394 CV_Error(Error::StsUnsupportedFormat, ""); 1395 }; 1396 } 1397 if( normType0 == NORM_L2 ) 1398 result = sqrt(result); 1399 return isRelative ? result / (cvtest::norm(src2, normType) + DBL_EPSILON) : result; 1400 } 1401 1402 double PSNR(InputArray _src1, InputArray _src2) 1403 { 1404 CV_Assert( _src1.depth() == CV_8U ); 1405 double diff = std::sqrt(cvtest::norm(_src1, _src2, NORM_L2SQR)/(_src1.total()*_src1.channels())); 1406 return 20*log10(255./(diff+DBL_EPSILON)); 1407 } 1408 1409 template<typename _Tp> static double 1410 crossCorr_(const _Tp* src1, const _Tp* src2, size_t total) 1411 { 1412 double result = 0; 1413 for( size_t i = 0; i < total; i++ ) 1414 result += (double)src1[i]*src2[i]; 1415 return result; 1416 } 1417 1418 double crossCorr(const Mat& src1, const Mat& src2) 1419 { 1420 CV_Assert( src1.size == src2.size && src1.type() == src2.type() ); 1421 const Mat *arrays[]={&src1, &src2, 0}; 1422 Mat planes[2]; 1423 1424 NAryMatIterator it(arrays, planes); 1425 size_t total = planes[0].total()*planes[0].channels(); 1426 size_t i, nplanes = it.nplanes; 1427 int depth = src1.depth(); 1428 double result = 0; 1429 1430 for( i = 0; i < nplanes; i++, ++it ) 1431 { 1432 const uchar* sptr1 = planes[0].ptr(); 1433 const uchar* sptr2 = planes[1].ptr(); 1434 1435 switch( depth ) 1436 { 1437 case CV_8U: 1438 result += crossCorr_((const uchar*)sptr1, (const uchar*)sptr2, total); 1439 break; 1440 case CV_8S: 1441 result += crossCorr_((const schar*)sptr1, (const schar*)sptr2, total); 1442 break; 1443 case CV_16U: 1444 result += crossCorr_((const ushort*)sptr1, (const ushort*)sptr2, total); 1445 break; 1446 case CV_16S: 1447 result += crossCorr_((const short*)sptr1, (const short*)sptr2, total); 1448 break; 1449 case CV_32S: 1450 result += crossCorr_((const int*)sptr1, (const int*)sptr2, total); 1451 break; 1452 case CV_32F: 1453 result += crossCorr_((const float*)sptr1, (const float*)sptr2, total); 1454 break; 1455 case CV_64F: 1456 result += crossCorr_((const double*)sptr1, (const double*)sptr2, total); 1457 break; 1458 default: 1459 CV_Error(Error::StsUnsupportedFormat, ""); 1460 }; 1461 } 1462 return result; 1463 } 1464 1465 1466 static void 1467 logicOp_(const uchar* src1, const uchar* src2, uchar* dst, size_t total, char c) 1468 { 1469 size_t i; 1470 if( c == '&' ) 1471 for( i = 0; i < total; i++ ) 1472 dst[i] = src1[i] & src2[i]; 1473 else if( c == '|' ) 1474 for( i = 0; i < total; i++ ) 1475 dst[i] = src1[i] | src2[i]; 1476 else 1477 for( i = 0; i < total; i++ ) 1478 dst[i] = src1[i] ^ src2[i]; 1479 } 1480 1481 static void 1482 logicOpS_(const uchar* src, const uchar* scalar, uchar* dst, size_t total, char c) 1483 { 1484 const size_t blockSize = 96; 1485 size_t i, j; 1486 if( c == '&' ) 1487 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize ) 1488 { 1489 size_t sz = MIN(total - i, blockSize); 1490 for( j = 0; j < sz; j++ ) 1491 dst[j] = src[j] & scalar[j]; 1492 } 1493 else if( c == '|' ) 1494 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize ) 1495 { 1496 size_t sz = MIN(total - i, blockSize); 1497 for( j = 0; j < sz; j++ ) 1498 dst[j] = src[j] | scalar[j]; 1499 } 1500 else if( c == '^' ) 1501 { 1502 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize ) 1503 { 1504 size_t sz = MIN(total - i, blockSize); 1505 for( j = 0; j < sz; j++ ) 1506 dst[j] = src[j] ^ scalar[j]; 1507 } 1508 } 1509 else 1510 for( i = 0; i < total; i++ ) 1511 dst[i] = ~src[i]; 1512 } 1513 1514 1515 void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op ) 1516 { 1517 CV_Assert( op == '&' || op == '|' || op == '^' ); 1518 CV_Assert( src1.type() == src2.type() && src1.size == src2.size ); 1519 dst.create( src1.dims, &src1.size[0], src1.type() ); 1520 const Mat *arrays[]={&src1, &src2, &dst, 0}; 1521 Mat planes[3]; 1522 1523 NAryMatIterator it(arrays, planes); 1524 size_t total = planes[0].total()*planes[0].elemSize(); 1525 size_t i, nplanes = it.nplanes; 1526 1527 for( i = 0; i < nplanes; i++, ++it ) 1528 { 1529 const uchar* sptr1 = planes[0].ptr(); 1530 const uchar* sptr2 = planes[1].ptr(); 1531 uchar* dptr = planes[2].ptr(); 1532 1533 logicOp_(sptr1, sptr2, dptr, total, op); 1534 } 1535 } 1536 1537 1538 void logicOp(const Mat& src, const Scalar& s, Mat& dst, char op) 1539 { 1540 CV_Assert( op == '&' || op == '|' || op == '^' || op == '~' ); 1541 dst.create( src.dims, &src.size[0], src.type() ); 1542 const Mat *arrays[]={&src, &dst, 0}; 1543 Mat planes[2]; 1544 1545 NAryMatIterator it(arrays, planes); 1546 size_t total = planes[0].total()*planes[0].elemSize(); 1547 size_t i, nplanes = it.nplanes; 1548 double buf[12]; 1549 scalarToRawData(s, buf, src.type(), (int)(96/planes[0].elemSize1())); 1550 1551 for( i = 0; i < nplanes; i++, ++it ) 1552 { 1553 const uchar* sptr = planes[0].ptr(); 1554 uchar* dptr = planes[1].ptr(); 1555 1556 logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op); 1557 } 1558 } 1559 1560 1561 template<typename _Tp> static void 1562 compare_(const _Tp* src1, const _Tp* src2, uchar* dst, size_t total, int cmpop) 1563 { 1564 size_t i; 1565 switch( cmpop ) 1566 { 1567 case CMP_LT: 1568 for( i = 0; i < total; i++ ) 1569 dst[i] = src1[i] < src2[i] ? 255 : 0; 1570 break; 1571 case CMP_LE: 1572 for( i = 0; i < total; i++ ) 1573 dst[i] = src1[i] <= src2[i] ? 255 : 0; 1574 break; 1575 case CMP_EQ: 1576 for( i = 0; i < total; i++ ) 1577 dst[i] = src1[i] == src2[i] ? 255 : 0; 1578 break; 1579 case CMP_NE: 1580 for( i = 0; i < total; i++ ) 1581 dst[i] = src1[i] != src2[i] ? 255 : 0; 1582 break; 1583 case CMP_GE: 1584 for( i = 0; i < total; i++ ) 1585 dst[i] = src1[i] >= src2[i] ? 255 : 0; 1586 break; 1587 case CMP_GT: 1588 for( i = 0; i < total; i++ ) 1589 dst[i] = src1[i] > src2[i] ? 255 : 0; 1590 break; 1591 default: 1592 CV_Error(Error::StsBadArg, "Unknown comparison operation"); 1593 } 1594 } 1595 1596 1597 template<typename _Tp, typename _WTp> static void 1598 compareS_(const _Tp* src1, _WTp value, uchar* dst, size_t total, int cmpop) 1599 { 1600 size_t i; 1601 switch( cmpop ) 1602 { 1603 case CMP_LT: 1604 for( i = 0; i < total; i++ ) 1605 dst[i] = src1[i] < value ? 255 : 0; 1606 break; 1607 case CMP_LE: 1608 for( i = 0; i < total; i++ ) 1609 dst[i] = src1[i] <= value ? 255 : 0; 1610 break; 1611 case CMP_EQ: 1612 for( i = 0; i < total; i++ ) 1613 dst[i] = src1[i] == value ? 255 : 0; 1614 break; 1615 case CMP_NE: 1616 for( i = 0; i < total; i++ ) 1617 dst[i] = src1[i] != value ? 255 : 0; 1618 break; 1619 case CMP_GE: 1620 for( i = 0; i < total; i++ ) 1621 dst[i] = src1[i] >= value ? 255 : 0; 1622 break; 1623 case CMP_GT: 1624 for( i = 0; i < total; i++ ) 1625 dst[i] = src1[i] > value ? 255 : 0; 1626 break; 1627 default: 1628 CV_Error(Error::StsBadArg, "Unknown comparison operation"); 1629 } 1630 } 1631 1632 1633 void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop) 1634 { 1635 CV_Assert( src1.type() == src2.type() && src1.channels() == 1 && src1.size == src2.size ); 1636 dst.create( src1.dims, &src1.size[0], CV_8U ); 1637 const Mat *arrays[]={&src1, &src2, &dst, 0}; 1638 Mat planes[3]; 1639 1640 NAryMatIterator it(arrays, planes); 1641 size_t total = planes[0].total(); 1642 size_t i, nplanes = it.nplanes; 1643 int depth = src1.depth(); 1644 1645 for( i = 0; i < nplanes; i++, ++it ) 1646 { 1647 const uchar* sptr1 = planes[0].ptr(); 1648 const uchar* sptr2 = planes[1].ptr(); 1649 uchar* dptr = planes[2].ptr(); 1650 1651 switch( depth ) 1652 { 1653 case CV_8U: 1654 compare_((const uchar*)sptr1, (const uchar*)sptr2, dptr, total, cmpop); 1655 break; 1656 case CV_8S: 1657 compare_((const schar*)sptr1, (const schar*)sptr2, dptr, total, cmpop); 1658 break; 1659 case CV_16U: 1660 compare_((const ushort*)sptr1, (const ushort*)sptr2, dptr, total, cmpop); 1661 break; 1662 case CV_16S: 1663 compare_((const short*)sptr1, (const short*)sptr2, dptr, total, cmpop); 1664 break; 1665 case CV_32S: 1666 compare_((const int*)sptr1, (const int*)sptr2, dptr, total, cmpop); 1667 break; 1668 case CV_32F: 1669 compare_((const float*)sptr1, (const float*)sptr2, dptr, total, cmpop); 1670 break; 1671 case CV_64F: 1672 compare_((const double*)sptr1, (const double*)sptr2, dptr, total, cmpop); 1673 break; 1674 default: 1675 CV_Error(Error::StsUnsupportedFormat, ""); 1676 } 1677 } 1678 } 1679 1680 void compare(const Mat& src, double value, Mat& dst, int cmpop) 1681 { 1682 CV_Assert( src.channels() == 1 ); 1683 dst.create( src.dims, &src.size[0], CV_8U ); 1684 const Mat *arrays[]={&src, &dst, 0}; 1685 Mat planes[2]; 1686 1687 NAryMatIterator it(arrays, planes); 1688 size_t total = planes[0].total(); 1689 size_t i, nplanes = it.nplanes; 1690 int depth = src.depth(); 1691 int ivalue = saturate_cast<int>(value); 1692 1693 for( i = 0; i < nplanes; i++, ++it ) 1694 { 1695 const uchar* sptr = planes[0].ptr(); 1696 uchar* dptr = planes[1].ptr(); 1697 1698 switch( depth ) 1699 { 1700 case CV_8U: 1701 compareS_((const uchar*)sptr, ivalue, dptr, total, cmpop); 1702 break; 1703 case CV_8S: 1704 compareS_((const schar*)sptr, ivalue, dptr, total, cmpop); 1705 break; 1706 case CV_16U: 1707 compareS_((const ushort*)sptr, ivalue, dptr, total, cmpop); 1708 break; 1709 case CV_16S: 1710 compareS_((const short*)sptr, ivalue, dptr, total, cmpop); 1711 break; 1712 case CV_32S: 1713 compareS_((const int*)sptr, ivalue, dptr, total, cmpop); 1714 break; 1715 case CV_32F: 1716 compareS_((const float*)sptr, value, dptr, total, cmpop); 1717 break; 1718 case CV_64F: 1719 compareS_((const double*)sptr, value, dptr, total, cmpop); 1720 break; 1721 default: 1722 CV_Error(Error::StsUnsupportedFormat, ""); 1723 } 1724 } 1725 } 1726 1727 1728 template<typename _Tp> double 1729 cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff, 1730 size_t startidx, size_t& idx) 1731 { 1732 size_t i; 1733 int realmaxdiff = 0; 1734 for( i = 0; i < total; i++ ) 1735 { 1736 int diff = std::abs(src1[i] - src2[i]); 1737 if( realmaxdiff < diff ) 1738 { 1739 realmaxdiff = diff; 1740 if( diff > imaxdiff && idx == 0 ) 1741 idx = i + startidx; 1742 } 1743 } 1744 return realmaxdiff; 1745 } 1746 1747 1748 template<> double cmpUlpsInt_<int>(const int* src1, const int* src2, 1749 size_t total, int imaxdiff, 1750 size_t startidx, size_t& idx) 1751 { 1752 size_t i; 1753 double realmaxdiff = 0; 1754 for( i = 0; i < total; i++ ) 1755 { 1756 double diff = fabs((double)src1[i] - (double)src2[i]); 1757 if( realmaxdiff < diff ) 1758 { 1759 realmaxdiff = diff; 1760 if( diff > imaxdiff && idx == 0 ) 1761 idx = i + startidx; 1762 } 1763 } 1764 return realmaxdiff; 1765 } 1766 1767 1768 static double 1769 cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx) 1770 { 1771 const int C = 0x7fffffff; 1772 int realmaxdiff = 0; 1773 size_t i; 1774 for( i = 0; i < total; i++ ) 1775 { 1776 int a = src1[i], b = src2[i]; 1777 if( a < 0 ) a ^= C; if( b < 0 ) b ^= C; 1778 int diff = std::abs(a - b); 1779 if( realmaxdiff < diff ) 1780 { 1781 realmaxdiff = diff; 1782 if( diff > imaxdiff && idx == 0 ) 1783 idx = i + startidx; 1784 } 1785 } 1786 return realmaxdiff; 1787 } 1788 1789 1790 static double 1791 cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx) 1792 { 1793 const int64 C = CV_BIG_INT(0x7fffffffffffffff); 1794 double realmaxdiff = 0; 1795 size_t i; 1796 for( i = 0; i < total; i++ ) 1797 { 1798 int64 a = src1[i], b = src2[i]; 1799 if( a < 0 ) a ^= C; if( b < 0 ) b ^= C; 1800 double diff = fabs((double)a - (double)b); 1801 if( realmaxdiff < diff ) 1802 { 1803 realmaxdiff = diff; 1804 if( diff > imaxdiff && idx == 0 ) 1805 idx = i + startidx; 1806 } 1807 } 1808 return realmaxdiff; 1809 } 1810 1811 bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdiff, vector<int>* loc) 1812 { 1813 CV_Assert( src1.type() == src2.type() && src1.size == src2.size ); 1814 const Mat *arrays[]={&src1, &src2, 0}; 1815 Mat planes[2]; 1816 NAryMatIterator it(arrays, planes); 1817 size_t total = planes[0].total()*planes[0].channels(); 1818 size_t i, nplanes = it.nplanes; 1819 int depth = src1.depth(); 1820 size_t startidx = 1, idx = 0; 1821 if(_realmaxdiff) 1822 *_realmaxdiff = 0; 1823 1824 for( i = 0; i < nplanes; i++, ++it, startidx += total ) 1825 { 1826 const uchar* sptr1 = planes[0].ptr(); 1827 const uchar* sptr2 = planes[1].ptr(); 1828 double realmaxdiff = 0; 1829 1830 switch( depth ) 1831 { 1832 case CV_8U: 1833 realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, imaxDiff, startidx, idx); 1834 break; 1835 case CV_8S: 1836 realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, imaxDiff, startidx, idx); 1837 break; 1838 case CV_16U: 1839 realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, imaxDiff, startidx, idx); 1840 break; 1841 case CV_16S: 1842 realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, imaxDiff, startidx, idx); 1843 break; 1844 case CV_32S: 1845 realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx); 1846 break; 1847 case CV_32F: 1848 realmaxdiff = cmpUlpsFlt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx); 1849 break; 1850 case CV_64F: 1851 realmaxdiff = cmpUlpsFlt_((const int64*)sptr1, (const int64*)sptr2, total, imaxDiff, startidx, idx); 1852 break; 1853 default: 1854 CV_Error(Error::StsUnsupportedFormat, ""); 1855 } 1856 1857 if(_realmaxdiff) 1858 *_realmaxdiff = std::max(*_realmaxdiff, realmaxdiff); 1859 } 1860 if(idx > 0 && loc) 1861 setpos(src1, *loc, idx); 1862 return idx == 0; 1863 } 1864 1865 1866 template<typename _Tp> static void 1867 checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_t& idx) 1868 { 1869 for( size_t i = 0; i < total; i++ ) 1870 { 1871 int val = a[i]; 1872 if( val < imin || val > imax ) 1873 { 1874 idx = i + startidx; 1875 break; 1876 } 1877 } 1878 } 1879 1880 1881 template<typename _Tp> static void 1882 checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx, size_t& idx) 1883 { 1884 for( size_t i = 0; i < total; i++ ) 1885 { 1886 double val = a[i]; 1887 if( cvIsNaN(val) || cvIsInf(val) || val < fmin || val > fmax ) 1888 { 1889 idx = i + startidx; 1890 break; 1891 } 1892 } 1893 } 1894 1895 1896 // checks that the array does not have NaNs and/or Infs and all the elements are 1897 // within [min_val,max_val). idx is the index of the first "bad" element. 1898 int check( const Mat& a, double fmin, double fmax, vector<int>* _idx ) 1899 { 1900 const Mat *arrays[]={&a, 0}; 1901 Mat plane; 1902 NAryMatIterator it(arrays, &plane); 1903 size_t total = plane.total()*plane.channels(); 1904 size_t i, nplanes = it.nplanes; 1905 int depth = a.depth(); 1906 size_t startidx = 1, idx = 0; 1907 int imin = 0, imax = 0; 1908 1909 if( depth <= CV_32S ) 1910 { 1911 imin = cvCeil(fmin); 1912 imax = cvFloor(fmax); 1913 } 1914 1915 for( i = 0; i < nplanes; i++, ++it, startidx += total ) 1916 { 1917 const uchar* aptr = plane.ptr(); 1918 1919 switch( depth ) 1920 { 1921 case CV_8U: 1922 checkInt_((const uchar*)aptr, total, imin, imax, startidx, idx); 1923 break; 1924 case CV_8S: 1925 checkInt_((const schar*)aptr, total, imin, imax, startidx, idx); 1926 break; 1927 case CV_16U: 1928 checkInt_((const ushort*)aptr, total, imin, imax, startidx, idx); 1929 break; 1930 case CV_16S: 1931 checkInt_((const short*)aptr, total, imin, imax, startidx, idx); 1932 break; 1933 case CV_32S: 1934 checkInt_((const int*)aptr, total, imin, imax, startidx, idx); 1935 break; 1936 case CV_32F: 1937 checkFlt_((const float*)aptr, total, fmin, fmax, startidx, idx); 1938 break; 1939 case CV_64F: 1940 checkFlt_((const double*)aptr, total, fmin, fmax, startidx, idx); 1941 break; 1942 default: 1943 CV_Error(Error::StsUnsupportedFormat, ""); 1944 } 1945 1946 if( idx != 0 ) 1947 break; 1948 } 1949 1950 if(idx != 0 && _idx) 1951 setpos(a, *_idx, idx); 1952 return idx == 0 ? 0 : -1; 1953 } 1954 1955 #define CMP_EPS_OK 0 1956 #define CMP_EPS_BIG_DIFF -1 1957 #define CMP_EPS_INVALID_TEST_DATA -2 // there is NaN or Inf value in test data 1958 #define CMP_EPS_INVALID_REF_DATA -3 // there is NaN or Inf value in reference data 1959 1960 // compares two arrays. max_diff is the maximum actual difference, 1961 // success_err_level is maximum allowed difference, idx is the index of the first 1962 // element for which difference is >success_err_level 1963 // (or index of element with the maximum difference) 1964 int cmpEps( const Mat& arr, const Mat& refarr, double* _realmaxdiff, 1965 double success_err_level, vector<int>* _idx, 1966 bool element_wise_relative_error ) 1967 { 1968 CV_Assert( arr.type() == refarr.type() && arr.size == refarr.size ); 1969 1970 int ilevel = refarr.depth() <= CV_32S ? cvFloor(success_err_level) : 0; 1971 int result = CMP_EPS_OK; 1972 1973 const Mat *arrays[]={&arr, &refarr, 0}; 1974 Mat planes[2]; 1975 NAryMatIterator it(arrays, planes); 1976 size_t total = planes[0].total()*planes[0].channels(), j = total; 1977 size_t i, nplanes = it.nplanes; 1978 int depth = arr.depth(); 1979 size_t startidx = 1, idx = 0; 1980 double realmaxdiff = 0, maxval = 0; 1981 1982 if(_realmaxdiff) 1983 *_realmaxdiff = 0; 1984 1985 if( refarr.depth() >= CV_32F && !element_wise_relative_error ) 1986 { 1987 maxval = cvtest::norm( refarr, NORM_INF ); 1988 maxval = MAX(maxval, 1.); 1989 } 1990 1991 for( i = 0; i < nplanes; i++, ++it, startidx += total ) 1992 { 1993 const uchar* sptr1 = planes[0].ptr(); 1994 const uchar* sptr2 = planes[1].ptr(); 1995 1996 switch( depth ) 1997 { 1998 case CV_8U: 1999 realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, ilevel, startidx, idx); 2000 break; 2001 case CV_8S: 2002 realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, ilevel, startidx, idx); 2003 break; 2004 case CV_16U: 2005 realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, ilevel, startidx, idx); 2006 break; 2007 case CV_16S: 2008 realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, ilevel, startidx, idx); 2009 break; 2010 case CV_32S: 2011 realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, ilevel, startidx, idx); 2012 break; 2013 case CV_32F: 2014 for( j = 0; j < total; j++ ) 2015 { 2016 double a_val = ((float*)sptr1)[j]; 2017 double b_val = ((float*)sptr2)[j]; 2018 double threshold; 2019 if( ((int*)sptr1)[j] == ((int*)sptr2)[j] ) 2020 continue; 2021 if( cvIsNaN(a_val) || cvIsInf(a_val) ) 2022 { 2023 result = CMP_EPS_INVALID_TEST_DATA; 2024 idx = startidx + j; 2025 break; 2026 } 2027 if( cvIsNaN(b_val) || cvIsInf(b_val) ) 2028 { 2029 result = CMP_EPS_INVALID_REF_DATA; 2030 idx = startidx + j; 2031 break; 2032 } 2033 a_val = fabs(a_val - b_val); 2034 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval; 2035 if( a_val > threshold*success_err_level ) 2036 { 2037 realmaxdiff = a_val/threshold; 2038 if( idx == 0 ) 2039 idx = startidx + j; 2040 break; 2041 } 2042 } 2043 break; 2044 case CV_64F: 2045 for( j = 0; j < total; j++ ) 2046 { 2047 double a_val = ((double*)sptr1)[j]; 2048 double b_val = ((double*)sptr2)[j]; 2049 double threshold; 2050 if( ((int64*)sptr1)[j] == ((int64*)sptr2)[j] ) 2051 continue; 2052 if( cvIsNaN(a_val) || cvIsInf(a_val) ) 2053 { 2054 result = CMP_EPS_INVALID_TEST_DATA; 2055 idx = startidx + j; 2056 break; 2057 } 2058 if( cvIsNaN(b_val) || cvIsInf(b_val) ) 2059 { 2060 result = CMP_EPS_INVALID_REF_DATA; 2061 idx = startidx + j; 2062 break; 2063 } 2064 a_val = fabs(a_val - b_val); 2065 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval; 2066 if( a_val > threshold*success_err_level ) 2067 { 2068 realmaxdiff = a_val/threshold; 2069 idx = startidx + j; 2070 break; 2071 } 2072 } 2073 break; 2074 default: 2075 assert(0); 2076 return CMP_EPS_BIG_DIFF; 2077 } 2078 if(_realmaxdiff) 2079 *_realmaxdiff = MAX(*_realmaxdiff, realmaxdiff); 2080 if( idx != 0 ) 2081 break; 2082 } 2083 2084 if( result == 0 && idx != 0 ) 2085 result = CMP_EPS_BIG_DIFF; 2086 2087 if( result < -1 && _realmaxdiff ) 2088 *_realmaxdiff = exp(1000.); 2089 if(idx > 0 && _idx) 2090 setpos(arr, *_idx, idx); 2091 2092 return result; 2093 } 2094 2095 2096 int cmpEps2( TS* ts, const Mat& a, const Mat& b, double success_err_level, 2097 bool element_wise_relative_error, const char* desc ) 2098 { 2099 char msg[100]; 2100 double diff = 0; 2101 vector<int> idx; 2102 int code = cmpEps( a, b, &diff, success_err_level, &idx, element_wise_relative_error ); 2103 2104 switch( code ) 2105 { 2106 case CMP_EPS_BIG_DIFF: 2107 sprintf( msg, "%s: Too big difference (=%g)", desc, diff ); 2108 code = TS::FAIL_BAD_ACCURACY; 2109 break; 2110 case CMP_EPS_INVALID_TEST_DATA: 2111 sprintf( msg, "%s: Invalid output", desc ); 2112 code = TS::FAIL_INVALID_OUTPUT; 2113 break; 2114 case CMP_EPS_INVALID_REF_DATA: 2115 sprintf( msg, "%s: Invalid reference output", desc ); 2116 code = TS::FAIL_INVALID_OUTPUT; 2117 break; 2118 default: 2119 ; 2120 } 2121 2122 if( code < 0 ) 2123 { 2124 if( a.total() == 1 ) 2125 { 2126 ts->printf( TS::LOG, "%s\n", msg ); 2127 } 2128 else if( a.dims == 2 && (a.rows == 1 || a.cols == 1) ) 2129 { 2130 ts->printf( TS::LOG, "%s at element %d\n", msg, idx[0] + idx[1] ); 2131 } 2132 else 2133 { 2134 string idxstr = vec2str(", ", &idx[0], idx.size()); 2135 ts->printf( TS::LOG, "%s at (%s)\n", msg, idxstr.c_str() ); 2136 } 2137 } 2138 2139 return code; 2140 } 2141 2142 2143 int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len, 2144 double eps, const char* param_name ) 2145 { 2146 Mat _val(1, len, CV_64F, (void*)val); 2147 Mat _refval(1, len, CV_64F, (void*)refval); 2148 2149 return cmpEps2( ts, _val, _refval, eps, true, param_name ); 2150 } 2151 2152 2153 template<typename _Tp> static void 2154 GEMM_(const _Tp* a_data0, int a_step, int a_delta, 2155 const _Tp* b_data0, int b_step, int b_delta, 2156 const _Tp* c_data0, int c_step, int c_delta, 2157 _Tp* d_data, int d_step, 2158 int d_rows, int d_cols, int a_cols, int cn, 2159 double alpha, double beta) 2160 { 2161 for( int i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step ) 2162 { 2163 for( int j = 0; j < d_cols; j++ ) 2164 { 2165 const _Tp* a_data = a_data0; 2166 const _Tp* b_data = b_data0 + j*b_delta; 2167 const _Tp* c_data = c_data0 + j*c_delta; 2168 2169 if( cn == 1 ) 2170 { 2171 double s = 0; 2172 for( int k = 0; k < a_cols; k++ ) 2173 { 2174 s += ((double)a_data[0])*b_data[0]; 2175 a_data += a_delta; 2176 b_data += b_step; 2177 } 2178 d_data[j] = (_Tp)(s*alpha + (c_data ? c_data[0]*beta : 0)); 2179 } 2180 else 2181 { 2182 double s_re = 0, s_im = 0; 2183 2184 for( int k = 0; k < a_cols; k++ ) 2185 { 2186 s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1]; 2187 s_im += ((double)a_data[0])*b_data[1] + ((double)a_data[1])*b_data[0]; 2188 a_data += a_delta; 2189 b_data += b_step; 2190 } 2191 2192 s_re *= alpha; 2193 s_im *= alpha; 2194 2195 if( c_data ) 2196 { 2197 s_re += c_data[0]*beta; 2198 s_im += c_data[1]*beta; 2199 } 2200 2201 d_data[j*2] = (_Tp)s_re; 2202 d_data[j*2+1] = (_Tp)s_im; 2203 } 2204 } 2205 } 2206 } 2207 2208 2209 void gemm( const Mat& _a, const Mat& _b, double alpha, 2210 const Mat& _c, double beta, Mat& d, int flags ) 2211 { 2212 Mat a = _a, b = _b, c = _c; 2213 2214 if( a.data == d.data ) 2215 a = a.clone(); 2216 2217 if( b.data == d.data ) 2218 b = b.clone(); 2219 2220 if( !c.empty() && c.data == d.data && (flags & cv::GEMM_3_T) ) 2221 c = c.clone(); 2222 2223 int a_rows = a.rows, a_cols = a.cols, b_rows = b.rows, b_cols = b.cols; 2224 int cn = a.channels(); 2225 int a_step = (int)a.step1(), a_delta = cn; 2226 int b_step = (int)b.step1(), b_delta = cn; 2227 int c_rows = 0, c_cols = 0, c_step = 0, c_delta = 0; 2228 2229 CV_Assert( a.type() == b.type() && a.dims == 2 && b.dims == 2 && cn <= 2 ); 2230 2231 if( flags & cv::GEMM_1_T ) 2232 { 2233 std::swap( a_rows, a_cols ); 2234 std::swap( a_step, a_delta ); 2235 } 2236 2237 if( flags & cv::GEMM_2_T ) 2238 { 2239 std::swap( b_rows, b_cols ); 2240 std::swap( b_step, b_delta ); 2241 } 2242 2243 if( !c.empty() ) 2244 { 2245 c_rows = c.rows; 2246 c_cols = c.cols; 2247 c_step = (int)c.step1(); 2248 c_delta = cn; 2249 2250 if( flags & cv::GEMM_3_T ) 2251 { 2252 std::swap( c_rows, c_cols ); 2253 std::swap( c_step, c_delta ); 2254 } 2255 2256 CV_Assert( c.dims == 2 && c.type() == a.type() && c_rows == a_rows && c_cols == b_cols ); 2257 } 2258 2259 d.create(a_rows, b_cols, a.type()); 2260 2261 if( a.depth() == CV_32F ) 2262 GEMM_(a.ptr<float>(), a_step, a_delta, b.ptr<float>(), b_step, b_delta, 2263 !c.empty() ? c.ptr<float>() : 0, c_step, c_delta, d.ptr<float>(), 2264 (int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta ); 2265 else 2266 GEMM_(a.ptr<double>(), a_step, a_delta, b.ptr<double>(), b_step, b_delta, 2267 !c.empty() ? c.ptr<double>() : 0, c_step, c_delta, d.ptr<double>(), 2268 (int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta ); 2269 } 2270 2271 2272 template<typename _Tp> static void 2273 transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const double* mat) 2274 { 2275 for( size_t i = 0; i < total; i++, sptr += scn, dptr += dcn ) 2276 { 2277 for( int j = 0; j < dcn; j++ ) 2278 { 2279 double s = mat[j*(scn + 1) + scn]; 2280 for( int k = 0; k < scn; k++ ) 2281 s += mat[j*(scn + 1) + k]*sptr[k]; 2282 dptr[j] = saturate_cast<_Tp>(s); 2283 } 2284 } 2285 } 2286 2287 2288 void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift ) 2289 { 2290 double mat[20]; 2291 2292 int scn = src.channels(); 2293 int dcn = dst.channels(); 2294 int depth = src.depth(); 2295 int mattype = transmat.depth(); 2296 Mat shift = _shift.reshape(1, 0); 2297 bool haveShift = !shift.empty(); 2298 2299 CV_Assert( scn <= 4 && dcn <= 4 && 2300 (mattype == CV_32F || mattype == CV_64F) && 2301 (!haveShift || (shift.type() == mattype && (shift.rows == 1 || shift.cols == 1))) ); 2302 2303 // prepare cn x (cn + 1) transform matrix 2304 if( mattype == CV_32F ) 2305 { 2306 for( int i = 0; i < transmat.rows; i++ ) 2307 { 2308 mat[i*(scn+1)+scn] = 0.; 2309 for( int j = 0; j < transmat.cols; j++ ) 2310 mat[i*(scn+1)+j] = transmat.at<float>(i,j); 2311 if( haveShift ) 2312 mat[i*(scn+1)+scn] = shift.at<float>(i); 2313 } 2314 } 2315 else 2316 { 2317 for( int i = 0; i < transmat.rows; i++ ) 2318 { 2319 mat[i*(scn+1)+scn] = 0.; 2320 for( int j = 0; j < transmat.cols; j++ ) 2321 mat[i*(scn+1)+j] = transmat.at<double>(i,j); 2322 if( haveShift ) 2323 mat[i*(scn+1)+scn] = shift.at<double>(i); 2324 } 2325 } 2326 2327 const Mat *arrays[]={&src, &dst, 0}; 2328 Mat planes[2]; 2329 NAryMatIterator it(arrays, planes); 2330 size_t total = planes[0].total(); 2331 size_t i, nplanes = it.nplanes; 2332 2333 for( i = 0; i < nplanes; i++, ++it ) 2334 { 2335 const uchar* sptr = planes[0].ptr(); 2336 uchar* dptr = planes[1].ptr(); 2337 2338 switch( depth ) 2339 { 2340 case CV_8U: 2341 transform_((const uchar*)sptr, (uchar*)dptr, total, scn, dcn, mat); 2342 break; 2343 case CV_8S: 2344 transform_((const schar*)sptr, (schar*)dptr, total, scn, dcn, mat); 2345 break; 2346 case CV_16U: 2347 transform_((const ushort*)sptr, (ushort*)dptr, total, scn, dcn, mat); 2348 break; 2349 case CV_16S: 2350 transform_((const short*)sptr, (short*)dptr, total, scn, dcn, mat); 2351 break; 2352 case CV_32S: 2353 transform_((const int*)sptr, (int*)dptr, total, scn, dcn, mat); 2354 break; 2355 case CV_32F: 2356 transform_((const float*)sptr, (float*)dptr, total, scn, dcn, mat); 2357 break; 2358 case CV_64F: 2359 transform_((const double*)sptr, (double*)dptr, total, scn, dcn, mat); 2360 break; 2361 default: 2362 CV_Error(Error::StsUnsupportedFormat, ""); 2363 } 2364 } 2365 } 2366 2367 template<typename _Tp> static void 2368 minmax_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, char op) 2369 { 2370 if( op == 'M' ) 2371 for( size_t i = 0; i < total; i++ ) 2372 dst[i] = std::max(src1[i], src2[i]); 2373 else 2374 for( size_t i = 0; i < total; i++ ) 2375 dst[i] = std::min(src1[i], src2[i]); 2376 } 2377 2378 static void minmax(const Mat& src1, const Mat& src2, Mat& dst, char op) 2379 { 2380 dst.create(src1.dims, src1.size, src1.type()); 2381 CV_Assert( src1.type() == src2.type() && src1.size == src2.size ); 2382 const Mat *arrays[]={&src1, &src2, &dst, 0}; 2383 Mat planes[3]; 2384 2385 NAryMatIterator it(arrays, planes); 2386 size_t total = planes[0].total()*planes[0].channels(); 2387 size_t i, nplanes = it.nplanes, depth = src1.depth(); 2388 2389 for( i = 0; i < nplanes; i++, ++it ) 2390 { 2391 const uchar* sptr1 = planes[0].ptr(); 2392 const uchar* sptr2 = planes[1].ptr(); 2393 uchar* dptr = planes[2].ptr(); 2394 2395 switch( depth ) 2396 { 2397 case CV_8U: 2398 minmax_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, op); 2399 break; 2400 case CV_8S: 2401 minmax_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, op); 2402 break; 2403 case CV_16U: 2404 minmax_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, op); 2405 break; 2406 case CV_16S: 2407 minmax_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, op); 2408 break; 2409 case CV_32S: 2410 minmax_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, op); 2411 break; 2412 case CV_32F: 2413 minmax_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, op); 2414 break; 2415 case CV_64F: 2416 minmax_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, op); 2417 break; 2418 default: 2419 CV_Error(Error::StsUnsupportedFormat, ""); 2420 } 2421 } 2422 } 2423 2424 2425 void min(const Mat& src1, const Mat& src2, Mat& dst) 2426 { 2427 minmax( src1, src2, dst, 'm' ); 2428 } 2429 2430 void max(const Mat& src1, const Mat& src2, Mat& dst) 2431 { 2432 minmax( src1, src2, dst, 'M' ); 2433 } 2434 2435 2436 template<typename _Tp> static void 2437 minmax_(const _Tp* src1, _Tp val, _Tp* dst, size_t total, char op) 2438 { 2439 if( op == 'M' ) 2440 for( size_t i = 0; i < total; i++ ) 2441 dst[i] = std::max(src1[i], val); 2442 else 2443 for( size_t i = 0; i < total; i++ ) 2444 dst[i] = std::min(src1[i], val); 2445 } 2446 2447 static void minmax(const Mat& src1, double val, Mat& dst, char op) 2448 { 2449 dst.create(src1.dims, src1.size, src1.type()); 2450 const Mat *arrays[]={&src1, &dst, 0}; 2451 Mat planes[2]; 2452 2453 NAryMatIterator it(arrays, planes); 2454 size_t total = planes[0].total()*planes[0].channels(); 2455 size_t i, nplanes = it.nplanes, depth = src1.depth(); 2456 int ival = saturate_cast<int>(val); 2457 2458 for( i = 0; i < nplanes; i++, ++it ) 2459 { 2460 const uchar* sptr1 = planes[0].ptr(); 2461 uchar* dptr = planes[1].ptr(); 2462 2463 switch( depth ) 2464 { 2465 case CV_8U: 2466 minmax_((const uchar*)sptr1, saturate_cast<uchar>(ival), (uchar*)dptr, total, op); 2467 break; 2468 case CV_8S: 2469 minmax_((const schar*)sptr1, saturate_cast<schar>(ival), (schar*)dptr, total, op); 2470 break; 2471 case CV_16U: 2472 minmax_((const ushort*)sptr1, saturate_cast<ushort>(ival), (ushort*)dptr, total, op); 2473 break; 2474 case CV_16S: 2475 minmax_((const short*)sptr1, saturate_cast<short>(ival), (short*)dptr, total, op); 2476 break; 2477 case CV_32S: 2478 minmax_((const int*)sptr1, saturate_cast<int>(ival), (int*)dptr, total, op); 2479 break; 2480 case CV_32F: 2481 minmax_((const float*)sptr1, saturate_cast<float>(val), (float*)dptr, total, op); 2482 break; 2483 case CV_64F: 2484 minmax_((const double*)sptr1, saturate_cast<double>(val), (double*)dptr, total, op); 2485 break; 2486 default: 2487 CV_Error(Error::StsUnsupportedFormat, ""); 2488 } 2489 } 2490 } 2491 2492 2493 void min(const Mat& src1, double val, Mat& dst) 2494 { 2495 minmax( src1, val, dst, 'm' ); 2496 } 2497 2498 void max(const Mat& src1, double val, Mat& dst) 2499 { 2500 minmax( src1, val, dst, 'M' ); 2501 } 2502 2503 2504 template<typename _Tp> static void 2505 muldiv_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, double scale, char op) 2506 { 2507 if( op == '*' ) 2508 for( size_t i = 0; i < total; i++ ) 2509 dst[i] = saturate_cast<_Tp>((scale*src1[i])*src2[i]); 2510 else if( src1 ) 2511 for( size_t i = 0; i < total; i++ ) 2512 dst[i] = src2[i] ? saturate_cast<_Tp>((scale*src1[i])/src2[i]) : 0; 2513 else 2514 for( size_t i = 0; i < total; i++ ) 2515 dst[i] = src2[i] ? saturate_cast<_Tp>(scale/src2[i]) : 0; 2516 } 2517 2518 static void muldiv(const Mat& src1, const Mat& src2, Mat& dst, double scale, char op) 2519 { 2520 dst.create(src2.dims, src2.size, src2.type()); 2521 CV_Assert( src1.empty() || (src1.type() == src2.type() && src1.size == src2.size) ); 2522 const Mat *arrays[]={&src1, &src2, &dst, 0}; 2523 Mat planes[3]; 2524 2525 NAryMatIterator it(arrays, planes); 2526 size_t total = planes[1].total()*planes[1].channels(); 2527 size_t i, nplanes = it.nplanes, depth = src2.depth(); 2528 2529 for( i = 0; i < nplanes; i++, ++it ) 2530 { 2531 const uchar* sptr1 = planes[0].ptr(); 2532 const uchar* sptr2 = planes[1].ptr(); 2533 uchar* dptr = planes[2].ptr(); 2534 2535 switch( depth ) 2536 { 2537 case CV_8U: 2538 muldiv_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, scale, op); 2539 break; 2540 case CV_8S: 2541 muldiv_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, scale, op); 2542 break; 2543 case CV_16U: 2544 muldiv_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, scale, op); 2545 break; 2546 case CV_16S: 2547 muldiv_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, scale, op); 2548 break; 2549 case CV_32S: 2550 muldiv_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, scale, op); 2551 break; 2552 case CV_32F: 2553 muldiv_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, scale, op); 2554 break; 2555 case CV_64F: 2556 muldiv_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, scale, op); 2557 break; 2558 default: 2559 CV_Error(Error::StsUnsupportedFormat, ""); 2560 } 2561 } 2562 } 2563 2564 2565 void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale) 2566 { 2567 muldiv( src1, src2, dst, scale, '*' ); 2568 } 2569 2570 void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale) 2571 { 2572 muldiv( src1, src2, dst, scale, '/' ); 2573 } 2574 2575 2576 template<typename _Tp> static void 2577 mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int& nz) 2578 { 2579 if( !mask ) 2580 { 2581 nz += (int)total; 2582 total *= cn; 2583 for( size_t i = 0; i < total; i += cn ) 2584 { 2585 for( int c = 0; c < cn; c++ ) 2586 sum[c] += src[i + c]; 2587 } 2588 } 2589 else 2590 { 2591 for( size_t i = 0; i < total; i++ ) 2592 if( mask[i] ) 2593 { 2594 nz++; 2595 for( int c = 0; c < cn; c++ ) 2596 sum[c] += src[i*cn + c]; 2597 } 2598 } 2599 } 2600 2601 Scalar mean(const Mat& src, const Mat& mask) 2602 { 2603 CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size == src.size)); 2604 Scalar sum; 2605 int nz = 0; 2606 2607 const Mat *arrays[]={&src, &mask, 0}; 2608 Mat planes[2]; 2609 2610 NAryMatIterator it(arrays, planes); 2611 size_t total = planes[0].total(); 2612 size_t i, nplanes = it.nplanes; 2613 int depth = src.depth(), cn = src.channels(); 2614 2615 for( i = 0; i < nplanes; i++, ++it ) 2616 { 2617 const uchar* sptr = planes[0].ptr(); 2618 const uchar* mptr = planes[1].ptr(); 2619 2620 switch( depth ) 2621 { 2622 case CV_8U: 2623 mean_((const uchar*)sptr, mptr, total, cn, sum, nz); 2624 break; 2625 case CV_8S: 2626 mean_((const schar*)sptr, mptr, total, cn, sum, nz); 2627 break; 2628 case CV_16U: 2629 mean_((const ushort*)sptr, mptr, total, cn, sum, nz); 2630 break; 2631 case CV_16S: 2632 mean_((const short*)sptr, mptr, total, cn, sum, nz); 2633 break; 2634 case CV_32S: 2635 mean_((const int*)sptr, mptr, total, cn, sum, nz); 2636 break; 2637 case CV_32F: 2638 mean_((const float*)sptr, mptr, total, cn, sum, nz); 2639 break; 2640 case CV_64F: 2641 mean_((const double*)sptr, mptr, total, cn, sum, nz); 2642 break; 2643 default: 2644 CV_Error(Error::StsUnsupportedFormat, ""); 2645 } 2646 } 2647 2648 return sum * (1./std::max(nz, 1)); 2649 } 2650 2651 2652 void patchZeros( Mat& mat, double level ) 2653 { 2654 int j, ncols = mat.cols * mat.channels(); 2655 int depth = mat.depth(); 2656 CV_Assert( depth == CV_32F || depth == CV_64F ); 2657 2658 for( int i = 0; i < mat.rows; i++ ) 2659 { 2660 if( depth == CV_32F ) 2661 { 2662 float* data = mat.ptr<float>(i); 2663 for( j = 0; j < ncols; j++ ) 2664 if( fabs(data[j]) < level ) 2665 data[j] += 1; 2666 } 2667 else 2668 { 2669 double* data = mat.ptr<double>(i); 2670 for( j = 0; j < ncols; j++ ) 2671 if( fabs(data[j]) < level ) 2672 data[j] += 1; 2673 } 2674 } 2675 } 2676 2677 2678 static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<int>& kernel ) 2679 { 2680 int i, j, oldval, newval; 2681 kernel.resize(size + 1); 2682 2683 if( _aperture_size < 0 ) 2684 { 2685 static const int scharr[] = { 3, 10, 3, -1, 0, 1 }; 2686 assert( size == 3 ); 2687 for( i = 0; i < size; i++ ) 2688 kernel[i] = scharr[order*3 + i]; 2689 return; 2690 } 2691 2692 for( i = 1; i <= size; i++ ) 2693 kernel[i] = 0; 2694 kernel[0] = 1; 2695 2696 for( i = 0; i < size - order - 1; i++ ) 2697 { 2698 oldval = kernel[0]; 2699 for( j = 1; j <= size; j++ ) 2700 { 2701 newval = kernel[j] + kernel[j-1]; 2702 kernel[j-1] = oldval; 2703 oldval = newval; 2704 } 2705 } 2706 2707 for( i = 0; i < order; i++ ) 2708 { 2709 oldval = -kernel[0]; 2710 for( j = 1; j <= size; j++ ) 2711 { 2712 newval = kernel[j-1] - kernel[j]; 2713 kernel[j-1] = oldval; 2714 oldval = newval; 2715 } 2716 } 2717 } 2718 2719 2720 Mat calcSobelKernel2D( int dx, int dy, int _aperture_size, int origin ) 2721 { 2722 CV_Assert( (_aperture_size == -1 || (_aperture_size >= 1 && _aperture_size % 2 == 1)) && 2723 dx >= 0 && dy >= 0 && dx + dy <= 3 ); 2724 Size ksize = _aperture_size == -1 ? Size(3,3) : _aperture_size > 1 ? 2725 Size(_aperture_size, _aperture_size) : dx > 0 ? Size(3, 1) : Size(1, 3); 2726 2727 Mat kernel(ksize, CV_32F); 2728 vector<int> kx, ky; 2729 2730 calcSobelKernel1D( dx, _aperture_size, ksize.width, kx ); 2731 calcSobelKernel1D( dy, _aperture_size, ksize.height, ky ); 2732 2733 for( int i = 0; i < kernel.rows; i++ ) 2734 { 2735 float ay = (float)ky[i]*(origin && (dy & 1) ? -1 : 1); 2736 for( int j = 0; j < kernel.cols; j++ ) 2737 kernel.at<float>(i, j) = kx[j]*ay; 2738 } 2739 2740 return kernel; 2741 } 2742 2743 2744 Mat calcLaplaceKernel2D( int aperture_size ) 2745 { 2746 int ksize = aperture_size == 1 ? 3 : aperture_size; 2747 Mat kernel(ksize, ksize, CV_32F); 2748 2749 vector<int> kx, ky; 2750 2751 calcSobelKernel1D( 2, aperture_size, ksize, kx ); 2752 if( aperture_size > 1 ) 2753 calcSobelKernel1D( 0, aperture_size, ksize, ky ); 2754 else 2755 { 2756 ky.resize(3); 2757 ky[0] = ky[2] = 0; ky[1] = 1; 2758 } 2759 2760 for( int i = 0; i < ksize; i++ ) 2761 for( int j = 0; j < ksize; j++ ) 2762 kernel.at<float>(i, j) = (float)(kx[j]*ky[i] + kx[i]*ky[j]); 2763 2764 return kernel; 2765 } 2766 2767 2768 void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat& _mapy ) 2769 { 2770 _mapx.create(sz, CV_32F); 2771 _mapy.create(sz, CV_32F); 2772 2773 double a[9], k[5]={0,0,0,0,0}; 2774 Mat _a(3, 3, CV_64F, a); 2775 Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k); 2776 double fx, fy, cx, cy, ifx, ify, cxn, cyn; 2777 2778 _a0.convertTo(_a, CV_64F); 2779 _k0.convertTo(_k, CV_64F); 2780 fx = a[0]; fy = a[4]; cx = a[2]; cy = a[5]; 2781 ifx = 1./fx; ify = 1./fy; 2782 cxn = cx; 2783 cyn = cy; 2784 2785 for( int v = 0; v < sz.height; v++ ) 2786 { 2787 for( int u = 0; u < sz.width; u++ ) 2788 { 2789 double x = (u - cxn)*ifx; 2790 double y = (v - cyn)*ify; 2791 double x2 = x*x, y2 = y*y; 2792 double r2 = x2 + y2; 2793 double cdist = 1 + (k[0] + (k[1] + k[4]*r2)*r2)*r2; 2794 double x1 = x*cdist + k[2]*2*x*y + k[3]*(r2 + 2*x2); 2795 double y1 = y*cdist + k[3]*2*x*y + k[2]*(r2 + 2*y2); 2796 2797 _mapy.at<float>(v, u) = (float)(y1*fy + cy); 2798 _mapx.at<float>(v, u) = (float)(x1*fx + cx); 2799 } 2800 } 2801 } 2802 2803 2804 std::ostream& operator << (std::ostream& out, const MatInfo& m) 2805 { 2806 if( !m.m || m.m->empty() ) 2807 out << "<Empty>"; 2808 else 2809 { 2810 static const char* depthstr[] = {"8u", "8s", "16u", "16s", "32s", "32f", "64f", "?"}; 2811 out << depthstr[m.m->depth()] << "C" << m.m->channels() << " " << m.m->dims << "-dim ("; 2812 for( int i = 0; i < m.m->dims; i++ ) 2813 out << m.m->size[i] << (i < m.m->dims-1 ? " x " : ")"); 2814 } 2815 return out; 2816 } 2817 2818 2819 static Mat getSubArray(const Mat& m, int border, vector<int>& ofs0, vector<int>& ofs) 2820 { 2821 ofs.resize(ofs0.size()); 2822 if( border < 0 ) 2823 { 2824 std::copy(ofs0.begin(), ofs0.end(), ofs.begin()); 2825 return m; 2826 } 2827 int i, d = m.dims; 2828 CV_Assert(d == (int)ofs.size()); 2829 vector<Range> r(d); 2830 for( i = 0; i < d; i++ ) 2831 { 2832 r[i].start = std::max(0, ofs0[i] - border); 2833 r[i].end = std::min(ofs0[i] + 1 + border, m.size[i]); 2834 ofs[i] = std::min(ofs0[i], border); 2835 } 2836 return m(&r[0]); 2837 } 2838 2839 template<typename _Tp, typename _WTp> static void 2840 writeElems(std::ostream& out, const void* data, int nelems, int starpos) 2841 { 2842 for(int i = 0; i < nelems; i++) 2843 { 2844 if( i == starpos ) 2845 out << "*"; 2846 out << (_WTp)((_Tp*)data)[i]; 2847 if( i == starpos ) 2848 out << "*"; 2849 out << (i+1 < nelems ? ", " : ""); 2850 } 2851 } 2852 2853 2854 static void writeElems(std::ostream& out, const void* data, int nelems, int depth, int starpos) 2855 { 2856 if(depth == CV_8U) 2857 writeElems<uchar, int>(out, data, nelems, starpos); 2858 else if(depth == CV_8S) 2859 writeElems<schar, int>(out, data, nelems, starpos); 2860 else if(depth == CV_16U) 2861 writeElems<ushort, int>(out, data, nelems, starpos); 2862 else if(depth == CV_16S) 2863 writeElems<short, int>(out, data, nelems, starpos); 2864 else if(depth == CV_32S) 2865 writeElems<int, int>(out, data, nelems, starpos); 2866 else if(depth == CV_32F) 2867 { 2868 std::streamsize pp = out.precision(); 2869 out.precision(8); 2870 writeElems<float, float>(out, data, nelems, starpos); 2871 out.precision(pp); 2872 } 2873 else if(depth == CV_64F) 2874 { 2875 std::streamsize pp = out.precision(); 2876 out.precision(16); 2877 writeElems<double, double>(out, data, nelems, starpos); 2878 out.precision(pp); 2879 } 2880 else 2881 CV_Error(Error::StsUnsupportedFormat, ""); 2882 } 2883 2884 2885 struct MatPart 2886 { 2887 MatPart(const Mat& _m, const vector<int>* _loc) 2888 : m(&_m), loc(_loc) {} 2889 const Mat* m; 2890 const vector<int>* loc; 2891 }; 2892 2893 static std::ostream& operator << (std::ostream& out, const MatPart& m) 2894 { 2895 CV_Assert( !m.loc || ((int)m.loc->size() == m.m->dims && m.m->dims <= 2) ); 2896 if( !m.loc ) 2897 out << *m.m; 2898 else 2899 { 2900 int i, depth = m.m->depth(), cn = m.m->channels(), width = m.m->cols*cn; 2901 for( i = 0; i < m.m->rows; i++ ) 2902 { 2903 writeElems(out, m.m->ptr(i), width, depth, i == (*m.loc)[0] ? (*m.loc)[1] : -1); 2904 out << (i < m.m->rows-1 ? ";\n" : ""); 2905 } 2906 } 2907 return out; 2908 } 2909 2910 MatComparator::MatComparator(double _maxdiff, int _context) 2911 : maxdiff(_maxdiff), realmaxdiff(DBL_MAX), context(_context) {} 2912 2913 ::testing::AssertionResult 2914 MatComparator::operator()(const char* expr1, const char* expr2, 2915 const Mat& m1, const Mat& m2) 2916 { 2917 if( m1.type() != m2.type() || m1.size != m2.size ) 2918 return ::testing::AssertionFailure() 2919 << "The reference and the actual output arrays have different type or size:\n" 2920 << expr1 << " ~ " << MatInfo(m1) << "\n" 2921 << expr2 << " ~ " << MatInfo(m2) << "\n"; 2922 2923 //bool ok = cvtest::cmpUlps(m1, m2, maxdiff, &realmaxdiff, &loc0); 2924 int code = cmpEps( m1, m2, &realmaxdiff, maxdiff, &loc0, true); 2925 2926 if(code >= 0) 2927 return ::testing::AssertionSuccess(); 2928 2929 Mat m[] = {m1.reshape(1,0), m2.reshape(1,0)}; 2930 int dims = m[0].dims; 2931 vector<int> loc; 2932 int border = dims <= 2 ? context : 0; 2933 2934 Mat m1part, m2part; 2935 if( border == 0 ) 2936 { 2937 loc = loc0; 2938 m1part = Mat(1, 1, m[0].depth(), m[0].ptr(&loc[0])); 2939 m2part = Mat(1, 1, m[1].depth(), m[1].ptr(&loc[0])); 2940 } 2941 else 2942 { 2943 m1part = getSubArray(m[0], border, loc0, loc); 2944 m2part = getSubArray(m[1], border, loc0, loc); 2945 } 2946 2947 return ::testing::AssertionFailure() 2948 << "too big relative difference (" << realmaxdiff << " > " 2949 << maxdiff << ") between " 2950 << MatInfo(m1) << " '" << expr1 << "' and '" << expr2 << "' at " << Mat(loc0) << ".\n\n" 2951 << "'" << expr1 << "': " << MatPart(m1part, border > 0 ? &loc : 0) << ".\n\n" 2952 << "'" << expr2 << "': " << MatPart(m2part, border > 0 ? &loc : 0) << ".\n"; 2953 } 2954 2955 void printVersionInfo(bool useStdOut) 2956 { 2957 ::testing::Test::RecordProperty("cv_version", CV_VERSION); 2958 if(useStdOut) std::cout << "OpenCV version: " << CV_VERSION << std::endl; 2959 2960 std::string buildInfo( cv::getBuildInformation() ); 2961 2962 size_t pos1 = buildInfo.find("Version control"); 2963 size_t pos2 = buildInfo.find('\n', pos1); 2964 if(pos1 != std::string::npos && pos2 != std::string::npos) 2965 { 2966 size_t value_start = buildInfo.rfind(' ', pos2) + 1; 2967 std::string ver( buildInfo.substr(value_start, pos2 - value_start) ); 2968 ::testing::Test::RecordProperty("cv_vcs_version", ver); 2969 if (useStdOut) std::cout << "OpenCV VCS version: " << ver << std::endl; 2970 } 2971 2972 pos1 = buildInfo.find("inner version"); 2973 pos2 = buildInfo.find('\n', pos1); 2974 if(pos1 != std::string::npos && pos2 != std::string::npos) 2975 { 2976 size_t value_start = buildInfo.rfind(' ', pos2) + 1; 2977 std::string ver( buildInfo.substr(value_start, pos2 - value_start) ); 2978 ::testing::Test::RecordProperty("cv_inner_vcs_version", ver); 2979 if(useStdOut) std::cout << "Inner VCS version: " << ver << std::endl; 2980 } 2981 2982 const char * build_type = 2983 #ifdef _DEBUG 2984 "debug"; 2985 #else 2986 "release"; 2987 #endif 2988 2989 ::testing::Test::RecordProperty("cv_build_type", build_type); 2990 if (useStdOut) std::cout << "Build type: " << build_type << std::endl; 2991 2992 const char* parallel_framework = currentParallelFramework(); 2993 2994 if (parallel_framework) { 2995 ::testing::Test::RecordProperty("cv_parallel_framework", parallel_framework); 2996 if (useStdOut) std::cout << "Parallel framework: " << parallel_framework << std::endl; 2997 } 2998 2999 std::string cpu_features; 3000 3001 #if CV_POPCNT 3002 if (checkHardwareSupport(CV_CPU_POPCNT)) cpu_features += " popcnt"; 3003 #endif 3004 #if CV_MMX 3005 if (checkHardwareSupport(CV_CPU_MMX)) cpu_features += " mmx"; 3006 #endif 3007 #if CV_SSE 3008 if (checkHardwareSupport(CV_CPU_SSE)) cpu_features += " sse"; 3009 #endif 3010 #if CV_SSE2 3011 if (checkHardwareSupport(CV_CPU_SSE2)) cpu_features += " sse2"; 3012 #endif 3013 #if CV_SSE3 3014 if (checkHardwareSupport(CV_CPU_SSE3)) cpu_features += " sse3"; 3015 #endif 3016 #if CV_SSSE3 3017 if (checkHardwareSupport(CV_CPU_SSSE3)) cpu_features += " ssse3"; 3018 #endif 3019 #if CV_SSE4_1 3020 if (checkHardwareSupport(CV_CPU_SSE4_1)) cpu_features += " sse4.1"; 3021 #endif 3022 #if CV_SSE4_2 3023 if (checkHardwareSupport(CV_CPU_SSE4_2)) cpu_features += " sse4.2"; 3024 #endif 3025 #if CV_AVX 3026 if (checkHardwareSupport(CV_CPU_AVX)) cpu_features += " avx"; 3027 #endif 3028 #if CV_AVX2 3029 if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2"; 3030 #endif 3031 #if CV_FMA3 3032 if (checkHardwareSupport(CV_CPU_FMA3)) cpu_features += " fma3"; 3033 #endif 3034 #if CV_AVX_512F 3035 if (checkHardwareSupport(CV_CPU_AVX_512F) cpu_features += " avx-512f"; 3036 #endif 3037 #if CV_AVX_512BW 3038 if (checkHardwareSupport(CV_CPU_AVX_512BW) cpu_features += " avx-512bw"; 3039 #endif 3040 #if CV_AVX_512CD 3041 if (checkHardwareSupport(CV_CPU_AVX_512CD) cpu_features += " avx-512cd"; 3042 #endif 3043 #if CV_AVX_512DQ 3044 if (checkHardwareSupport(CV_CPU_AVX_512DQ) cpu_features += " avx-512dq"; 3045 #endif 3046 #if CV_AVX_512ER 3047 if (checkHardwareSupport(CV_CPU_AVX_512ER) cpu_features += " avx-512er"; 3048 #endif 3049 #if CV_AVX_512IFMA512 3050 if (checkHardwareSupport(CV_CPU_AVX_512IFMA512) cpu_features += " avx-512ifma512"; 3051 #endif 3052 #if CV_AVX_512PF 3053 if (checkHardwareSupport(CV_CPU_AVX_512PF) cpu_features += " avx-512pf"; 3054 #endif 3055 #if CV_AVX_512VBMI 3056 if (checkHardwareSupport(CV_CPU_AVX_512VBMI) cpu_features += " avx-512vbmi"; 3057 #endif 3058 #if CV_AVX_512VL 3059 if (checkHardwareSupport(CV_CPU_AVX_512VL) cpu_features += " avx-512vl"; 3060 #endif 3061 #if CV_NEON 3062 if (checkHardwareSupport(CV_CPU_NEON)) cpu_features += " neon"; 3063 #endif 3064 3065 cpu_features.erase(0, 1); // erase initial space 3066 3067 ::testing::Test::RecordProperty("cv_cpu_features", cpu_features); 3068 if (useStdOut) std::cout << "CPU features: " << cpu_features << std::endl; 3069 3070 #ifdef HAVE_TEGRA_OPTIMIZATION 3071 const char * tegra_optimization = tegra::useTegra() && tegra::isDeviceSupported() ? "enabled" : "disabled"; 3072 ::testing::Test::RecordProperty("cv_tegra_optimization", tegra_optimization); 3073 if (useStdOut) std::cout << "Tegra optimization: " << tegra_optimization << std::endl; 3074 #endif 3075 } 3076 3077 } 3078