1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 15 // Third party copyrights are property of their respective owners. 16 // 17 // Redistribution and use in source and binary forms, with or without modification, 18 // are permitted provided that the following conditions are met: 19 // 20 // * Redistribution's of source code must retain the above copyright notice, 21 // this list of conditions and the following disclaimer. 22 // 23 // * Redistribution's in binary form must reproduce the above copyright notice, 24 // this list of conditions and the following disclaimer in the documentation 25 // and/or other materials provided with the distribution. 26 // 27 // * The name of the copyright holders may not be used to endorse or promote products 28 // derived from this software without specific prior written permission. 29 // 30 // This software is provided by the copyright holders and contributors "as is" and 31 // any express or implied warranties, including, but not limited to, the implied 32 // warranties of merchantability and fitness for a particular purpose are disclaimed. 33 // In no event shall the Intel Corporation or contributors be liable for any direct, 34 // indirect, incidental, special, exemplary, or consequential damages 35 // (including, but not limited to, procurement of substitute goods or services; 36 // loss of use, data, or profits; or business interruption) however caused 37 // and on any theory of liability, whether in contract, strict liability, 38 // or tort (including negligence or otherwise) arising in any way out of 39 // the use of this software, even if advised of the possibility of such damage. 40 // 41 //M*/ 42 43 #include "test_precomp.hpp" 44 45 #ifdef HAVE_CUDA 46 47 using namespace cvtest; 48 49 ///////////////////////////////////////////////////////////////////////////////////////////////// 50 // FAST 51 52 namespace 53 { 54 IMPLEMENT_PARAM_CLASS(FAST_Threshold, int) 55 IMPLEMENT_PARAM_CLASS(FAST_NonmaxSuppression, bool) 56 } 57 58 PARAM_TEST_CASE(FAST, cv::cuda::DeviceInfo, FAST_Threshold, FAST_NonmaxSuppression) 59 { 60 cv::cuda::DeviceInfo devInfo; 61 int threshold; 62 bool nonmaxSuppression; 63 64 virtual void SetUp() 65 { 66 devInfo = GET_PARAM(0); 67 threshold = GET_PARAM(1); 68 nonmaxSuppression = GET_PARAM(2); 69 70 cv::cuda::setDevice(devInfo.deviceID()); 71 } 72 }; 73 74 CUDA_TEST_P(FAST, Accuracy) 75 { 76 cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE); 77 ASSERT_FALSE(image.empty()); 78 79 cv::Ptr<cv::cuda::FastFeatureDetector> fast = cv::cuda::FastFeatureDetector::create(threshold, nonmaxSuppression); 80 81 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS)) 82 { 83 try 84 { 85 std::vector<cv::KeyPoint> keypoints; 86 fast->detect(loadMat(image), keypoints); 87 } 88 catch (const cv::Exception& e) 89 { 90 ASSERT_EQ(cv::Error::StsNotImplemented, e.code); 91 } 92 } 93 else 94 { 95 std::vector<cv::KeyPoint> keypoints; 96 fast->detect(loadMat(image), keypoints); 97 98 std::vector<cv::KeyPoint> keypoints_gold; 99 cv::FAST(image, keypoints_gold, threshold, nonmaxSuppression); 100 101 ASSERT_KEYPOINTS_EQ(keypoints_gold, keypoints); 102 } 103 } 104 105 INSTANTIATE_TEST_CASE_P(CUDA_Features2D, FAST, testing::Combine( 106 ALL_DEVICES, 107 testing::Values(FAST_Threshold(25), FAST_Threshold(50)), 108 testing::Values(FAST_NonmaxSuppression(false), FAST_NonmaxSuppression(true)))); 109 110 ///////////////////////////////////////////////////////////////////////////////////////////////// 111 // ORB 112 113 namespace 114 { 115 IMPLEMENT_PARAM_CLASS(ORB_FeaturesCount, int) 116 IMPLEMENT_PARAM_CLASS(ORB_ScaleFactor, float) 117 IMPLEMENT_PARAM_CLASS(ORB_LevelsCount, int) 118 IMPLEMENT_PARAM_CLASS(ORB_EdgeThreshold, int) 119 IMPLEMENT_PARAM_CLASS(ORB_firstLevel, int) 120 IMPLEMENT_PARAM_CLASS(ORB_WTA_K, int) 121 IMPLEMENT_PARAM_CLASS(ORB_PatchSize, int) 122 IMPLEMENT_PARAM_CLASS(ORB_BlurForDescriptor, bool) 123 } 124 125 CV_ENUM(ORB_ScoreType, cv::ORB::HARRIS_SCORE, cv::ORB::FAST_SCORE) 126 127 PARAM_TEST_CASE(ORB, cv::cuda::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, ORB_LevelsCount, ORB_EdgeThreshold, ORB_firstLevel, ORB_WTA_K, ORB_ScoreType, ORB_PatchSize, ORB_BlurForDescriptor) 128 { 129 cv::cuda::DeviceInfo devInfo; 130 int nFeatures; 131 float scaleFactor; 132 int nLevels; 133 int edgeThreshold; 134 int firstLevel; 135 int WTA_K; 136 int scoreType; 137 int patchSize; 138 bool blurForDescriptor; 139 140 virtual void SetUp() 141 { 142 devInfo = GET_PARAM(0); 143 nFeatures = GET_PARAM(1); 144 scaleFactor = GET_PARAM(2); 145 nLevels = GET_PARAM(3); 146 edgeThreshold = GET_PARAM(4); 147 firstLevel = GET_PARAM(5); 148 WTA_K = GET_PARAM(6); 149 scoreType = GET_PARAM(7); 150 patchSize = GET_PARAM(8); 151 blurForDescriptor = GET_PARAM(9); 152 153 cv::cuda::setDevice(devInfo.deviceID()); 154 } 155 }; 156 157 CUDA_TEST_P(ORB, Accuracy) 158 { 159 cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE); 160 ASSERT_FALSE(image.empty()); 161 162 cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1)); 163 mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0)); 164 165 cv::Ptr<cv::cuda::ORB> orb = 166 cv::cuda::ORB::create(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, 167 WTA_K, scoreType, patchSize, 20, blurForDescriptor); 168 169 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS)) 170 { 171 try 172 { 173 std::vector<cv::KeyPoint> keypoints; 174 cv::cuda::GpuMat descriptors; 175 orb->detectAndComputeAsync(loadMat(image), loadMat(mask), keypoints, descriptors); 176 } 177 catch (const cv::Exception& e) 178 { 179 ASSERT_EQ(cv::Error::StsNotImplemented, e.code); 180 } 181 } 182 else 183 { 184 std::vector<cv::KeyPoint> keypoints; 185 cv::cuda::GpuMat descriptors; 186 orb->detectAndCompute(loadMat(image), loadMat(mask), keypoints, descriptors); 187 188 cv::Ptr<cv::ORB> orb_gold = cv::ORB::create(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, WTA_K, scoreType, patchSize); 189 190 std::vector<cv::KeyPoint> keypoints_gold; 191 cv::Mat descriptors_gold; 192 orb_gold->detectAndCompute(image, mask, keypoints_gold, descriptors_gold); 193 194 cv::BFMatcher matcher(cv::NORM_HAMMING); 195 std::vector<cv::DMatch> matches; 196 matcher.match(descriptors_gold, cv::Mat(descriptors), matches); 197 198 int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints, matches); 199 double matchedRatio = static_cast<double>(matchedCount) / keypoints.size(); 200 201 EXPECT_GT(matchedRatio, 0.35); 202 } 203 } 204 205 INSTANTIATE_TEST_CASE_P(CUDA_Features2D, ORB, testing::Combine( 206 ALL_DEVICES, 207 testing::Values(ORB_FeaturesCount(1000)), 208 testing::Values(ORB_ScaleFactor(1.2f)), 209 testing::Values(ORB_LevelsCount(4), ORB_LevelsCount(8)), 210 testing::Values(ORB_EdgeThreshold(31)), 211 testing::Values(ORB_firstLevel(0)), 212 testing::Values(ORB_WTA_K(2), ORB_WTA_K(3), ORB_WTA_K(4)), 213 testing::Values(ORB_ScoreType(cv::ORB::HARRIS_SCORE)), 214 testing::Values(ORB_PatchSize(31), ORB_PatchSize(29)), 215 testing::Values(ORB_BlurForDescriptor(false), ORB_BlurForDescriptor(true)))); 216 217 ///////////////////////////////////////////////////////////////////////////////////////////////// 218 // BruteForceMatcher 219 220 namespace 221 { 222 IMPLEMENT_PARAM_CLASS(DescriptorSize, int) 223 IMPLEMENT_PARAM_CLASS(UseMask, bool) 224 } 225 226 PARAM_TEST_CASE(BruteForceMatcher, cv::cuda::DeviceInfo, NormCode, DescriptorSize, UseMask) 227 { 228 cv::cuda::DeviceInfo devInfo; 229 int normCode; 230 int dim; 231 bool useMask; 232 233 int queryDescCount; 234 int countFactor; 235 236 cv::Mat query, train; 237 238 virtual void SetUp() 239 { 240 devInfo = GET_PARAM(0); 241 normCode = GET_PARAM(1); 242 dim = GET_PARAM(2); 243 useMask = GET_PARAM(3); 244 245 cv::cuda::setDevice(devInfo.deviceID()); 246 247 queryDescCount = 300; // must be even number because we split train data in some cases in two 248 countFactor = 4; // do not change it 249 250 cv::RNG& rng = cvtest::TS::ptr()->get_rng(); 251 252 cv::Mat queryBuf, trainBuf; 253 254 // Generate query descriptors randomly. 255 // Descriptor vector elements are integer values. 256 queryBuf.create(queryDescCount, dim, CV_32SC1); 257 rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3)); 258 queryBuf.convertTo(queryBuf, CV_32FC1); 259 260 // Generate train decriptors as follows: 261 // copy each query descriptor to train set countFactor times 262 // and perturb some one element of the copied descriptors in 263 // in ascending order. General boundaries of the perturbation 264 // are (0.f, 1.f). 265 trainBuf.create(queryDescCount * countFactor, dim, CV_32FC1); 266 float step = 1.f / countFactor; 267 for (int qIdx = 0; qIdx < queryDescCount; qIdx++) 268 { 269 cv::Mat queryDescriptor = queryBuf.row(qIdx); 270 for (int c = 0; c < countFactor; c++) 271 { 272 int tIdx = qIdx * countFactor + c; 273 cv::Mat trainDescriptor = trainBuf.row(tIdx); 274 queryDescriptor.copyTo(trainDescriptor); 275 int elem = rng(dim); 276 float diff = rng.uniform(step * c, step * (c + 1)); 277 trainDescriptor.at<float>(0, elem) += diff; 278 } 279 } 280 281 queryBuf.convertTo(query, CV_32F); 282 trainBuf.convertTo(train, CV_32F); 283 } 284 }; 285 286 CUDA_TEST_P(BruteForceMatcher, Match_Single) 287 { 288 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 289 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 290 291 cv::cuda::GpuMat mask; 292 if (useMask) 293 { 294 mask.create(query.rows, train.rows, CV_8UC1); 295 mask.setTo(cv::Scalar::all(1)); 296 } 297 298 std::vector<cv::DMatch> matches; 299 matcher->match(loadMat(query), loadMat(train), matches, mask); 300 301 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 302 303 int badCount = 0; 304 for (size_t i = 0; i < matches.size(); i++) 305 { 306 cv::DMatch match = matches[i]; 307 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor) || (match.imgIdx != 0)) 308 badCount++; 309 } 310 311 ASSERT_EQ(0, badCount); 312 } 313 314 CUDA_TEST_P(BruteForceMatcher, Match_Collection) 315 { 316 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 317 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 318 319 cv::cuda::GpuMat d_train(train); 320 321 // make add() twice to test such case 322 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2))); 323 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows))); 324 325 // prepare masks (make first nearest match illegal) 326 std::vector<cv::cuda::GpuMat> masks(2); 327 for (int mi = 0; mi < 2; mi++) 328 { 329 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows/2, CV_8UC1, cv::Scalar::all(1)); 330 for (int di = 0; di < queryDescCount/2; di++) 331 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0)); 332 } 333 334 std::vector<cv::DMatch> matches; 335 if (useMask) 336 matcher->match(cv::cuda::GpuMat(query), matches, masks); 337 else 338 matcher->match(cv::cuda::GpuMat(query), matches); 339 340 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 341 342 int badCount = 0; 343 int shift = useMask ? 1 : 0; 344 for (size_t i = 0; i < matches.size(); i++) 345 { 346 cv::DMatch match = matches[i]; 347 348 if ((int)i < queryDescCount / 2) 349 { 350 bool validQueryIdx = (match.queryIdx == (int)i); 351 bool validTrainIdx = (match.trainIdx == (int)i * countFactor + shift); 352 bool validImgIdx = (match.imgIdx == 0); 353 if (!validQueryIdx || !validTrainIdx || !validImgIdx) 354 badCount++; 355 } 356 else 357 { 358 bool validQueryIdx = (match.queryIdx == (int)i); 359 bool validTrainIdx = (match.trainIdx == ((int)i - queryDescCount / 2) * countFactor + shift); 360 bool validImgIdx = (match.imgIdx == 1); 361 if (!validQueryIdx || !validTrainIdx || !validImgIdx) 362 badCount++; 363 } 364 } 365 366 ASSERT_EQ(0, badCount); 367 } 368 369 CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Single) 370 { 371 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 372 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 373 374 const int knn = 2; 375 376 cv::cuda::GpuMat mask; 377 if (useMask) 378 { 379 mask.create(query.rows, train.rows, CV_8UC1); 380 mask.setTo(cv::Scalar::all(1)); 381 } 382 383 std::vector< std::vector<cv::DMatch> > matches; 384 matcher->knnMatch(loadMat(query), loadMat(train), matches, knn, mask); 385 386 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 387 388 int badCount = 0; 389 for (size_t i = 0; i < matches.size(); i++) 390 { 391 if ((int)matches[i].size() != knn) 392 badCount++; 393 else 394 { 395 int localBadCount = 0; 396 for (int k = 0; k < knn; k++) 397 { 398 cv::DMatch match = matches[i][k]; 399 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0)) 400 localBadCount++; 401 } 402 badCount += localBadCount > 0 ? 1 : 0; 403 } 404 } 405 406 ASSERT_EQ(0, badCount); 407 } 408 409 CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Single) 410 { 411 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 412 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 413 414 const int knn = 3; 415 416 cv::cuda::GpuMat mask; 417 if (useMask) 418 { 419 mask.create(query.rows, train.rows, CV_8UC1); 420 mask.setTo(cv::Scalar::all(1)); 421 } 422 423 std::vector< std::vector<cv::DMatch> > matches; 424 matcher->knnMatch(loadMat(query), loadMat(train), matches, knn, mask); 425 426 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 427 428 int badCount = 0; 429 for (size_t i = 0; i < matches.size(); i++) 430 { 431 if ((int)matches[i].size() != knn) 432 badCount++; 433 else 434 { 435 int localBadCount = 0; 436 for (int k = 0; k < knn; k++) 437 { 438 cv::DMatch match = matches[i][k]; 439 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0)) 440 localBadCount++; 441 } 442 badCount += localBadCount > 0 ? 1 : 0; 443 } 444 } 445 446 ASSERT_EQ(0, badCount); 447 } 448 449 CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Collection) 450 { 451 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 452 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 453 454 const int knn = 2; 455 456 cv::cuda::GpuMat d_train(train); 457 458 // make add() twice to test such case 459 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2))); 460 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows))); 461 462 // prepare masks (make first nearest match illegal) 463 std::vector<cv::cuda::GpuMat> masks(2); 464 for (int mi = 0; mi < 2; mi++ ) 465 { 466 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1)); 467 for (int di = 0; di < queryDescCount / 2; di++) 468 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0)); 469 } 470 471 std::vector< std::vector<cv::DMatch> > matches; 472 473 if (useMask) 474 matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn, masks); 475 else 476 matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn); 477 478 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 479 480 int badCount = 0; 481 int shift = useMask ? 1 : 0; 482 for (size_t i = 0; i < matches.size(); i++) 483 { 484 if ((int)matches[i].size() != knn) 485 badCount++; 486 else 487 { 488 int localBadCount = 0; 489 for (int k = 0; k < knn; k++) 490 { 491 cv::DMatch match = matches[i][k]; 492 { 493 if ((int)i < queryDescCount / 2) 494 { 495 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) ) 496 localBadCount++; 497 } 498 else 499 { 500 if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) ) 501 localBadCount++; 502 } 503 } 504 } 505 badCount += localBadCount > 0 ? 1 : 0; 506 } 507 } 508 509 ASSERT_EQ(0, badCount); 510 } 511 512 CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Collection) 513 { 514 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 515 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 516 517 const int knn = 3; 518 519 cv::cuda::GpuMat d_train(train); 520 521 // make add() twice to test such case 522 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2))); 523 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows))); 524 525 // prepare masks (make first nearest match illegal) 526 std::vector<cv::cuda::GpuMat> masks(2); 527 for (int mi = 0; mi < 2; mi++ ) 528 { 529 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1)); 530 for (int di = 0; di < queryDescCount / 2; di++) 531 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0)); 532 } 533 534 std::vector< std::vector<cv::DMatch> > matches; 535 536 if (useMask) 537 matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn, masks); 538 else 539 matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn); 540 541 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 542 543 int badCount = 0; 544 int shift = useMask ? 1 : 0; 545 for (size_t i = 0; i < matches.size(); i++) 546 { 547 if ((int)matches[i].size() != knn) 548 badCount++; 549 else 550 { 551 int localBadCount = 0; 552 for (int k = 0; k < knn; k++) 553 { 554 cv::DMatch match = matches[i][k]; 555 { 556 if ((int)i < queryDescCount / 2) 557 { 558 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) ) 559 localBadCount++; 560 } 561 else 562 { 563 if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) ) 564 localBadCount++; 565 } 566 } 567 } 568 badCount += localBadCount > 0 ? 1 : 0; 569 } 570 } 571 572 ASSERT_EQ(0, badCount); 573 } 574 575 CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Single) 576 { 577 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 578 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 579 580 const float radius = 1.f / countFactor; 581 582 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS)) 583 { 584 try 585 { 586 std::vector< std::vector<cv::DMatch> > matches; 587 matcher->radiusMatch(loadMat(query), loadMat(train), matches, radius); 588 } 589 catch (const cv::Exception& e) 590 { 591 ASSERT_EQ(cv::Error::StsNotImplemented, e.code); 592 } 593 } 594 else 595 { 596 cv::cuda::GpuMat mask; 597 if (useMask) 598 { 599 mask.create(query.rows, train.rows, CV_8UC1); 600 mask.setTo(cv::Scalar::all(1)); 601 } 602 603 std::vector< std::vector<cv::DMatch> > matches; 604 matcher->radiusMatch(loadMat(query), loadMat(train), matches, radius, mask); 605 606 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 607 608 int badCount = 0; 609 for (size_t i = 0; i < matches.size(); i++) 610 { 611 if ((int)matches[i].size() != 1) 612 badCount++; 613 else 614 { 615 cv::DMatch match = matches[i][0]; 616 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0)) 617 badCount++; 618 } 619 } 620 621 ASSERT_EQ(0, badCount); 622 } 623 } 624 625 CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Collection) 626 { 627 cv::Ptr<cv::cuda::DescriptorMatcher> matcher = 628 cv::cuda::DescriptorMatcher::createBFMatcher(normCode); 629 630 const int n = 3; 631 const float radius = 1.f / countFactor * n; 632 633 cv::cuda::GpuMat d_train(train); 634 635 // make add() twice to test such case 636 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2))); 637 matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows))); 638 639 // prepare masks (make first nearest match illegal) 640 std::vector<cv::cuda::GpuMat> masks(2); 641 for (int mi = 0; mi < 2; mi++) 642 { 643 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1)); 644 for (int di = 0; di < queryDescCount / 2; di++) 645 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0)); 646 } 647 648 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS)) 649 { 650 try 651 { 652 std::vector< std::vector<cv::DMatch> > matches; 653 matcher->radiusMatch(cv::cuda::GpuMat(query), matches, radius, masks); 654 } 655 catch (const cv::Exception& e) 656 { 657 ASSERT_EQ(cv::Error::StsNotImplemented, e.code); 658 } 659 } 660 else 661 { 662 std::vector< std::vector<cv::DMatch> > matches; 663 664 if (useMask) 665 matcher->radiusMatch(cv::cuda::GpuMat(query), matches, radius, masks); 666 else 667 matcher->radiusMatch(cv::cuda::GpuMat(query), matches, radius); 668 669 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size()); 670 671 int badCount = 0; 672 int shift = useMask ? 1 : 0; 673 int needMatchCount = useMask ? n-1 : n; 674 for (size_t i = 0; i < matches.size(); i++) 675 { 676 if ((int)matches[i].size() != needMatchCount) 677 badCount++; 678 else 679 { 680 int localBadCount = 0; 681 for (int k = 0; k < needMatchCount; k++) 682 { 683 cv::DMatch match = matches[i][k]; 684 { 685 if ((int)i < queryDescCount / 2) 686 { 687 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) ) 688 localBadCount++; 689 } 690 else 691 { 692 if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) ) 693 localBadCount++; 694 } 695 } 696 } 697 badCount += localBadCount > 0 ? 1 : 0; 698 } 699 } 700 701 ASSERT_EQ(0, badCount); 702 } 703 } 704 705 INSTANTIATE_TEST_CASE_P(CUDA_Features2D, BruteForceMatcher, testing::Combine( 706 ALL_DEVICES, 707 testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2)), 708 testing::Values(DescriptorSize(57), DescriptorSize(64), DescriptorSize(83), DescriptorSize(128), DescriptorSize(179), DescriptorSize(256), DescriptorSize(304)), 709 testing::Values(UseMask(false), UseMask(true)))); 710 711 #endif // HAVE_CUDA 712