Home | History | Annotate | Download | only in test
      1 /*M///////////////////////////////////////////////////////////////////////////////////////
      2 //
      3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
      4 //
      5 //  By downloading, copying, installing or using the software you agree to this license.
      6 //  If you do not agree to this license, do not download, install,
      7 //  copy or use the software.
      8 //
      9 //
     10 //                           License Agreement
     11 //                For Open Source Computer Vision Library
     12 //
     13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
     14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
     15 // Third party copyrights are property of their respective owners.
     16 //
     17 // Redistribution and use in source and binary forms, with or without modification,
     18 // are permitted provided that the following conditions are met:
     19 //
     20 //   * Redistribution's of source code must retain the above copyright notice,
     21 //     this list of conditions and the following disclaimer.
     22 //
     23 //   * Redistribution's in binary form must reproduce the above copyright notice,
     24 //     this list of conditions and the following disclaimer in the documentation
     25 //     and/or other materials provided with the distribution.
     26 //
     27 //   * The name of the copyright holders may not be used to endorse or promote products
     28 //     derived from this software without specific prior written permission.
     29 //
     30 // This software is provided by the copyright holders and contributors "as is" and
     31 // any express or implied warranties, including, but not limited to, the implied
     32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
     33 // In no event shall the Intel Corporation or contributors be liable for any direct,
     34 // indirect, incidental, special, exemplary, or consequential damages
     35 // (including, but not limited to, procurement of substitute goods or services;
     36 // loss of use, data, or profits; or business interruption) however caused
     37 // and on any theory of liability, whether in contract, strict liability,
     38 // or tort (including negligence or otherwise) arising in any way out of
     39 // the use of this software, even if advised of the possibility of such damage.
     40 //
     41 //M*/
     42 
     43 #include "test_precomp.hpp"
     44 
     45 #ifdef HAVE_CUDA
     46 
     47 using namespace cvtest;
     48 
     49 ////////////////////////////////////////////////////////////////////////////////
     50 // Add_Array
     51 
     52 PARAM_TEST_CASE(Add_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
     53 {
     54     cv::cuda::DeviceInfo devInfo;
     55     cv::Size size;
     56     std::pair<MatDepth, MatDepth> depth;
     57     int channels;
     58     bool useRoi;
     59 
     60     int stype;
     61     int dtype;
     62 
     63     virtual void SetUp()
     64     {
     65         devInfo = GET_PARAM(0);
     66         size = GET_PARAM(1);
     67         depth = GET_PARAM(2);
     68         channels = GET_PARAM(3);
     69         useRoi = GET_PARAM(4);
     70 
     71         cv::cuda::setDevice(devInfo.deviceID());
     72 
     73         stype = CV_MAKE_TYPE(depth.first, channels);
     74         dtype = CV_MAKE_TYPE(depth.second, channels);
     75     }
     76 };
     77 
     78 CUDA_TEST_P(Add_Array, Accuracy)
     79 {
     80     cv::Mat mat1 = randomMat(size, stype);
     81     cv::Mat mat2 = randomMat(size, stype);
     82 
     83     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
     84     {
     85         try
     86         {
     87             cv::cuda::GpuMat dst;
     88             cv::cuda::add(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
     89         }
     90         catch (const cv::Exception& e)
     91         {
     92             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
     93         }
     94     }
     95     else
     96     {
     97         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
     98         dst.setTo(cv::Scalar::all(0));
     99         cv::cuda::add(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, cv::cuda::GpuMat(), depth.second);
    100 
    101         cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
    102         cv::add(mat1, mat2, dst_gold, cv::noArray(), depth.second);
    103 
    104         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    105     }
    106 }
    107 
    108 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Array, testing::Combine(
    109     ALL_DEVICES,
    110     DIFFERENT_SIZES,
    111     DEPTH_PAIRS,
    112     ALL_CHANNELS,
    113     WHOLE_SUBMAT));
    114 
    115 PARAM_TEST_CASE(Add_Array_Mask, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    116 {
    117     cv::cuda::DeviceInfo devInfo;
    118     cv::Size size;
    119     std::pair<MatDepth, MatDepth> depth;
    120     bool useRoi;
    121 
    122     int stype;
    123     int dtype;
    124 
    125     virtual void SetUp()
    126     {
    127         devInfo = GET_PARAM(0);
    128         size = GET_PARAM(1);
    129         depth = GET_PARAM(2);
    130         useRoi = GET_PARAM(3);
    131 
    132         cv::cuda::setDevice(devInfo.deviceID());
    133 
    134         stype = CV_MAKE_TYPE(depth.first, 1);
    135         dtype = CV_MAKE_TYPE(depth.second, 1);
    136     }
    137 };
    138 
    139 CUDA_TEST_P(Add_Array_Mask, Accuracy)
    140 {
    141     cv::Mat mat1 = randomMat(size, stype);
    142     cv::Mat mat2 = randomMat(size, stype);
    143     cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
    144 
    145     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    146     {
    147         try
    148         {
    149             cv::cuda::GpuMat dst;
    150             cv::cuda::add(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
    151         }
    152         catch (const cv::Exception& e)
    153         {
    154             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    155         }
    156     }
    157     else
    158     {
    159         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
    160         dst.setTo(cv::Scalar::all(0));
    161         cv::cuda::add(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, loadMat(mask, useRoi), depth.second);
    162 
    163         cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
    164         cv::add(mat1, mat2, dst_gold, mask, depth.second);
    165 
    166         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    167     }
    168 }
    169 
    170 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Array_Mask, testing::Combine(
    171     ALL_DEVICES,
    172     DIFFERENT_SIZES,
    173     DEPTH_PAIRS,
    174     WHOLE_SUBMAT));
    175 
    176 ////////////////////////////////////////////////////////////////////////////////
    177 // Add_Scalar
    178 
    179 PARAM_TEST_CASE(Add_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    180 {
    181     cv::cuda::DeviceInfo devInfo;
    182     cv::Size size;
    183     std::pair<MatDepth, MatDepth> depth;
    184     bool useRoi;
    185 
    186     virtual void SetUp()
    187     {
    188         devInfo = GET_PARAM(0);
    189         size = GET_PARAM(1);
    190         depth = GET_PARAM(2);
    191         useRoi = GET_PARAM(3);
    192 
    193         cv::cuda::setDevice(devInfo.deviceID());
    194     }
    195 };
    196 
    197 CUDA_TEST_P(Add_Scalar, WithOutMask)
    198 {
    199     cv::Mat mat = randomMat(size, depth.first);
    200     cv::Scalar val = randomScalar(0, 255);
    201 
    202     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    203     {
    204         try
    205         {
    206             cv::cuda::GpuMat dst;
    207             cv::cuda::add(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
    208         }
    209         catch (const cv::Exception& e)
    210         {
    211             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    212         }
    213     }
    214     else
    215     {
    216         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    217         dst.setTo(cv::Scalar::all(0));
    218         cv::cuda::add(loadMat(mat, useRoi), val, dst, cv::cuda::GpuMat(), depth.second);
    219 
    220         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    221         cv::add(mat, val, dst_gold, cv::noArray(), depth.second);
    222 
    223         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
    224     }
    225 }
    226 
    227 CUDA_TEST_P(Add_Scalar, WithMask)
    228 {
    229     cv::Mat mat = randomMat(size, depth.first);
    230     cv::Scalar val = randomScalar(0, 255);
    231     cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
    232 
    233     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    234     {
    235         try
    236         {
    237             cv::cuda::GpuMat dst;
    238             cv::cuda::add(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
    239         }
    240         catch (const cv::Exception& e)
    241         {
    242             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    243         }
    244     }
    245     else
    246     {
    247         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    248         dst.setTo(cv::Scalar::all(0));
    249         cv::cuda::add(loadMat(mat, useRoi), val, dst, loadMat(mask, useRoi), depth.second);
    250 
    251         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    252         cv::add(mat, val, dst_gold, mask, depth.second);
    253 
    254         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
    255     }
    256 }
    257 
    258 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Scalar, testing::Combine(
    259     ALL_DEVICES,
    260     DIFFERENT_SIZES,
    261     DEPTH_PAIRS,
    262     WHOLE_SUBMAT));
    263 
    264 ////////////////////////////////////////////////////////////////////////////////
    265 // Add_Scalar_First
    266 
    267 PARAM_TEST_CASE(Add_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    268 {
    269     cv::cuda::DeviceInfo devInfo;
    270     cv::Size size;
    271     std::pair<MatDepth, MatDepth> depth;
    272     bool useRoi;
    273 
    274     virtual void SetUp()
    275     {
    276         devInfo = GET_PARAM(0);
    277         size = GET_PARAM(1);
    278         depth = GET_PARAM(2);
    279         useRoi = GET_PARAM(3);
    280 
    281         cv::cuda::setDevice(devInfo.deviceID());
    282     }
    283 };
    284 
    285 CUDA_TEST_P(Add_Scalar_First, WithOutMask)
    286 {
    287     cv::Mat mat = randomMat(size, depth.first);
    288     cv::Scalar val = randomScalar(0, 255);
    289 
    290     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    291     {
    292         try
    293         {
    294             cv::cuda::GpuMat dst;
    295             cv::cuda::add(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
    296         }
    297         catch (const cv::Exception& e)
    298         {
    299             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    300         }
    301     }
    302     else
    303     {
    304         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    305         dst.setTo(cv::Scalar::all(0));
    306         cv::cuda::add(val, loadMat(mat, useRoi), dst, cv::cuda::GpuMat(), depth.second);
    307 
    308         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    309         cv::add(val, mat, dst_gold, cv::noArray(), depth.second);
    310 
    311         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    312     }
    313 }
    314 
    315 CUDA_TEST_P(Add_Scalar_First, WithMask)
    316 {
    317     cv::Mat mat = randomMat(size, depth.first);
    318     cv::Scalar val = randomScalar(0, 255);
    319     cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
    320 
    321     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    322     {
    323         try
    324         {
    325             cv::cuda::GpuMat dst;
    326             cv::cuda::add(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
    327         }
    328         catch (const cv::Exception& e)
    329         {
    330             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    331         }
    332     }
    333     else
    334     {
    335         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    336         dst.setTo(cv::Scalar::all(0));
    337         cv::cuda::add(val, loadMat(mat, useRoi), dst, loadMat(mask, useRoi), depth.second);
    338 
    339         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    340         cv::add(val, mat, dst_gold, mask, depth.second);
    341 
    342         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    343     }
    344 }
    345 
    346 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Add_Scalar_First, testing::Combine(
    347     ALL_DEVICES,
    348     DIFFERENT_SIZES,
    349     DEPTH_PAIRS,
    350     WHOLE_SUBMAT));
    351 
    352 ////////////////////////////////////////////////////////////////////////////////
    353 // Subtract_Array
    354 
    355 PARAM_TEST_CASE(Subtract_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
    356 {
    357     cv::cuda::DeviceInfo devInfo;
    358     cv::Size size;
    359     std::pair<MatDepth, MatDepth> depth;
    360     int channels;
    361     bool useRoi;
    362 
    363     int stype;
    364     int dtype;
    365 
    366     virtual void SetUp()
    367     {
    368         devInfo = GET_PARAM(0);
    369         size = GET_PARAM(1);
    370         depth = GET_PARAM(2);
    371         channels = GET_PARAM(3);
    372         useRoi = GET_PARAM(4);
    373 
    374         cv::cuda::setDevice(devInfo.deviceID());
    375 
    376         stype = CV_MAKE_TYPE(depth.first, channels);
    377         dtype = CV_MAKE_TYPE(depth.second, channels);
    378     }
    379 };
    380 
    381 CUDA_TEST_P(Subtract_Array, Accuracy)
    382 {
    383     cv::Mat mat1 = randomMat(size, stype);
    384     cv::Mat mat2 = randomMat(size, stype);
    385 
    386     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    387     {
    388         try
    389         {
    390             cv::cuda::GpuMat dst;
    391             cv::cuda::subtract(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
    392         }
    393         catch (const cv::Exception& e)
    394         {
    395             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    396         }
    397     }
    398     else
    399     {
    400         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
    401         dst.setTo(cv::Scalar::all(0));
    402         cv::cuda::subtract(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, cv::cuda::GpuMat(), depth.second);
    403 
    404         cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
    405         cv::subtract(mat1, mat2, dst_gold, cv::noArray(), depth.second);
    406 
    407         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    408     }
    409 }
    410 
    411 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Array, testing::Combine(
    412     ALL_DEVICES,
    413     DIFFERENT_SIZES,
    414     DEPTH_PAIRS,
    415     ALL_CHANNELS,
    416     WHOLE_SUBMAT));
    417 
    418 PARAM_TEST_CASE(Subtract_Array_Mask, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    419 {
    420     cv::cuda::DeviceInfo devInfo;
    421     cv::Size size;
    422     std::pair<MatDepth, MatDepth> depth;
    423     bool useRoi;
    424 
    425     int stype;
    426     int dtype;
    427 
    428     virtual void SetUp()
    429     {
    430         devInfo = GET_PARAM(0);
    431         size = GET_PARAM(1);
    432         depth = GET_PARAM(2);
    433         useRoi = GET_PARAM(3);
    434 
    435         cv::cuda::setDevice(devInfo.deviceID());
    436 
    437         stype = CV_MAKE_TYPE(depth.first, 1);
    438         dtype = CV_MAKE_TYPE(depth.second, 1);
    439     }
    440 };
    441 
    442 CUDA_TEST_P(Subtract_Array_Mask, Accuracy)
    443 {
    444     cv::Mat mat1 = randomMat(size, stype);
    445     cv::Mat mat2 = randomMat(size, stype);
    446     cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
    447 
    448     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    449     {
    450         try
    451         {
    452             cv::cuda::GpuMat dst;
    453             cv::cuda::subtract(loadMat(mat1), loadMat(mat2), dst, cv::cuda::GpuMat(), depth.second);
    454         }
    455         catch (const cv::Exception& e)
    456         {
    457             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    458         }
    459     }
    460     else
    461     {
    462         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
    463         dst.setTo(cv::Scalar::all(0));
    464         cv::cuda::subtract(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, loadMat(mask, useRoi), depth.second);
    465 
    466         cv::Mat dst_gold(size, dtype, cv::Scalar::all(0));
    467         cv::subtract(mat1, mat2, dst_gold, mask, depth.second);
    468 
    469         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    470     }
    471 }
    472 
    473 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Array_Mask, testing::Combine(
    474     ALL_DEVICES,
    475     DIFFERENT_SIZES,
    476     DEPTH_PAIRS,
    477     WHOLE_SUBMAT));
    478 
    479 ////////////////////////////////////////////////////////////////////////////////
    480 // Subtract_Scalar
    481 
    482 PARAM_TEST_CASE(Subtract_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    483 {
    484     cv::cuda::DeviceInfo devInfo;
    485     cv::Size size;
    486     std::pair<MatDepth, MatDepth> depth;
    487     bool useRoi;
    488 
    489     virtual void SetUp()
    490     {
    491         devInfo = GET_PARAM(0);
    492         size = GET_PARAM(1);
    493         depth = GET_PARAM(2);
    494         useRoi = GET_PARAM(3);
    495 
    496         cv::cuda::setDevice(devInfo.deviceID());
    497     }
    498 };
    499 
    500 CUDA_TEST_P(Subtract_Scalar, WithOutMask)
    501 {
    502     cv::Mat mat = randomMat(size, depth.first);
    503     cv::Scalar val = randomScalar(0, 255);
    504 
    505     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    506     {
    507         try
    508         {
    509             cv::cuda::GpuMat dst;
    510             cv::cuda::subtract(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
    511         }
    512         catch (const cv::Exception& e)
    513         {
    514             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    515         }
    516     }
    517     else
    518     {
    519         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    520         dst.setTo(cv::Scalar::all(0));
    521         cv::cuda::subtract(loadMat(mat, useRoi), val, dst, cv::cuda::GpuMat(), depth.second);
    522 
    523         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    524         cv::subtract(mat, val, dst_gold, cv::noArray(), depth.second);
    525 
    526         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
    527     }
    528 }
    529 
    530 CUDA_TEST_P(Subtract_Scalar, WithMask)
    531 {
    532     cv::Mat mat = randomMat(size, depth.first);
    533     cv::Scalar val = randomScalar(0, 255);
    534     cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
    535 
    536     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    537     {
    538         try
    539         {
    540             cv::cuda::GpuMat dst;
    541             cv::cuda::subtract(loadMat(mat), val, dst, cv::cuda::GpuMat(), depth.second);
    542         }
    543         catch (const cv::Exception& e)
    544         {
    545             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    546         }
    547     }
    548     else
    549     {
    550         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    551         dst.setTo(cv::Scalar::all(0));
    552         cv::cuda::subtract(loadMat(mat, useRoi), val, dst, loadMat(mask, useRoi), depth.second);
    553 
    554         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    555         cv::subtract(mat, val, dst_gold, mask, depth.second);
    556 
    557         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
    558     }
    559 }
    560 
    561 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Scalar, testing::Combine(
    562     ALL_DEVICES,
    563     DIFFERENT_SIZES,
    564     DEPTH_PAIRS,
    565     WHOLE_SUBMAT));
    566 
    567 ////////////////////////////////////////////////////////////////////////////////
    568 // Subtract_Scalar_First
    569 
    570 PARAM_TEST_CASE(Subtract_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    571 {
    572     cv::cuda::DeviceInfo devInfo;
    573     cv::Size size;
    574     std::pair<MatDepth, MatDepth> depth;
    575     bool useRoi;
    576 
    577     virtual void SetUp()
    578     {
    579         devInfo = GET_PARAM(0);
    580         size = GET_PARAM(1);
    581         depth = GET_PARAM(2);
    582         useRoi = GET_PARAM(3);
    583 
    584         cv::cuda::setDevice(devInfo.deviceID());
    585     }
    586 };
    587 
    588 CUDA_TEST_P(Subtract_Scalar_First, WithOutMask)
    589 {
    590     cv::Mat mat = randomMat(size, depth.first);
    591     cv::Scalar val = randomScalar(0, 255);
    592 
    593     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    594     {
    595         try
    596         {
    597             cv::cuda::GpuMat dst;
    598             cv::cuda::subtract(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
    599         }
    600         catch (const cv::Exception& e)
    601         {
    602             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    603         }
    604     }
    605     else
    606     {
    607         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    608         dst.setTo(cv::Scalar::all(0));
    609         cv::cuda::subtract(val, loadMat(mat, useRoi), dst, cv::cuda::GpuMat(), depth.second);
    610 
    611         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    612         cv::subtract(val, mat, dst_gold, cv::noArray(), depth.second);
    613 
    614         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    615     }
    616 }
    617 
    618 CUDA_TEST_P(Subtract_Scalar_First, WithMask)
    619 {
    620     cv::Mat mat = randomMat(size, depth.first);
    621     cv::Scalar val = randomScalar(0, 255);
    622     cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
    623 
    624     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    625     {
    626         try
    627         {
    628             cv::cuda::GpuMat dst;
    629             cv::cuda::subtract(val, loadMat(mat), dst, cv::cuda::GpuMat(), depth.second);
    630         }
    631         catch (const cv::Exception& e)
    632         {
    633             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    634         }
    635     }
    636     else
    637     {
    638         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    639         dst.setTo(cv::Scalar::all(0));
    640         cv::cuda::subtract(val, loadMat(mat, useRoi), dst, loadMat(mask, useRoi), depth.second);
    641 
    642         cv::Mat dst_gold(size, depth.second, cv::Scalar::all(0));
    643         cv::subtract(val, mat, dst_gold, mask, depth.second);
    644 
    645         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 0.0);
    646     }
    647 }
    648 
    649 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Subtract_Scalar_First, testing::Combine(
    650     ALL_DEVICES,
    651     DIFFERENT_SIZES,
    652     DEPTH_PAIRS,
    653     WHOLE_SUBMAT));
    654 
    655 ////////////////////////////////////////////////////////////////////////////////
    656 // Multiply_Array
    657 
    658 PARAM_TEST_CASE(Multiply_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
    659 {
    660     cv::cuda::DeviceInfo devInfo;
    661     cv::Size size;
    662     std::pair<MatDepth, MatDepth> depth;
    663     int channels;
    664     bool useRoi;
    665 
    666     int stype;
    667     int dtype;
    668 
    669     virtual void SetUp()
    670     {
    671         devInfo = GET_PARAM(0);
    672         size = GET_PARAM(1);
    673         depth = GET_PARAM(2);
    674         channels = GET_PARAM(3);
    675         useRoi = GET_PARAM(4);
    676 
    677         cv::cuda::setDevice(devInfo.deviceID());
    678 
    679         stype = CV_MAKE_TYPE(depth.first, channels);
    680         dtype = CV_MAKE_TYPE(depth.second, channels);
    681     }
    682 };
    683 
    684 CUDA_TEST_P(Multiply_Array, WithOutScale)
    685 {
    686     cv::Mat mat1 = randomMat(size, stype);
    687     cv::Mat mat2 = randomMat(size, stype);
    688 
    689     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    690     {
    691         try
    692         {
    693             cv::cuda::GpuMat dst;
    694             cv::cuda::multiply(loadMat(mat1), loadMat(mat2), dst, 1, depth.second);
    695         }
    696         catch (const cv::Exception& e)
    697         {
    698             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    699         }
    700     }
    701     else
    702     {
    703         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
    704         cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, 1, depth.second);
    705 
    706         cv::Mat dst_gold;
    707         cv::multiply(mat1, mat2, dst_gold, 1, depth.second);
    708 
    709         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 0.0);
    710     }
    711 }
    712 
    713 CUDA_TEST_P(Multiply_Array, WithScale)
    714 {
    715     cv::Mat mat1 = randomMat(size, stype);
    716     cv::Mat mat2 = randomMat(size, stype);
    717     double scale = randomDouble(0.0, 255.0);
    718 
    719     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    720     {
    721         try
    722         {
    723             cv::cuda::GpuMat dst;
    724             cv::cuda::multiply(loadMat(mat1), loadMat(mat2), dst, scale, depth.second);
    725         }
    726         catch (const cv::Exception& e)
    727         {
    728             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    729         }
    730     }
    731     else
    732     {
    733         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
    734         cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, scale, depth.second);
    735 
    736         cv::Mat dst_gold;
    737         cv::multiply(mat1, mat2, dst_gold, scale, depth.second);
    738 
    739         EXPECT_MAT_NEAR(dst_gold, dst, 2.0);
    740     }
    741 }
    742 
    743 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Array, testing::Combine(
    744     ALL_DEVICES,
    745     DIFFERENT_SIZES,
    746     DEPTH_PAIRS,
    747     ALL_CHANNELS,
    748     WHOLE_SUBMAT));
    749 
    750 ////////////////////////////////////////////////////////////////////////////////
    751 // Multiply_Array_Special
    752 
    753 PARAM_TEST_CASE(Multiply_Array_Special, cv::cuda::DeviceInfo, cv::Size, UseRoi)
    754 {
    755     cv::cuda::DeviceInfo devInfo;
    756     cv::Size size;
    757     bool useRoi;
    758 
    759     virtual void SetUp()
    760     {
    761         devInfo = GET_PARAM(0);
    762         size = GET_PARAM(1);
    763         useRoi = GET_PARAM(2);
    764 
    765         cv::cuda::setDevice(devInfo.deviceID());
    766     }
    767 };
    768 
    769 CUDA_TEST_P(Multiply_Array_Special, Case_8UC4x_32FC1)
    770 {
    771     cv::Mat mat1 = randomMat(size, CV_8UC4);
    772     cv::Mat mat2 = randomMat(size, CV_32FC1);
    773 
    774     cv::cuda::GpuMat dst = createMat(size, CV_8UC4, useRoi);
    775     cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
    776 
    777     cv::Mat h_dst(dst);
    778 
    779     for (int y = 0; y < h_dst.rows; ++y)
    780     {
    781         const cv::Vec4b* mat1_row = mat1.ptr<cv::Vec4b>(y);
    782         const float* mat2_row = mat2.ptr<float>(y);
    783         const cv::Vec4b* dst_row = h_dst.ptr<cv::Vec4b>(y);
    784 
    785         for (int x = 0; x < h_dst.cols; ++x)
    786         {
    787             cv::Vec4b val1 = mat1_row[x];
    788             float val2 = mat2_row[x];
    789             cv::Vec4b actual = dst_row[x];
    790 
    791             cv::Vec4b gold;
    792 
    793             gold[0] = cv::saturate_cast<uchar>(val1[0] * val2);
    794             gold[1] = cv::saturate_cast<uchar>(val1[1] * val2);
    795             gold[2] = cv::saturate_cast<uchar>(val1[2] * val2);
    796             gold[3] = cv::saturate_cast<uchar>(val1[3] * val2);
    797 
    798             ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
    799             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
    800             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
    801             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
    802         }
    803     }
    804 }
    805 
    806 CUDA_TEST_P(Multiply_Array_Special, Case_16SC4x_32FC1)
    807 {
    808     cv::Mat mat1 = randomMat(size, CV_16SC4);
    809     cv::Mat mat2 = randomMat(size, CV_32FC1);
    810 
    811     cv::cuda::GpuMat dst = createMat(size, CV_16SC4, useRoi);
    812     cv::cuda::multiply(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
    813 
    814     cv::Mat h_dst(dst);
    815 
    816     for (int y = 0; y < h_dst.rows; ++y)
    817     {
    818         const cv::Vec4s* mat1_row = mat1.ptr<cv::Vec4s>(y);
    819         const float* mat2_row = mat2.ptr<float>(y);
    820         const cv::Vec4s* dst_row = h_dst.ptr<cv::Vec4s>(y);
    821 
    822         for (int x = 0; x < h_dst.cols; ++x)
    823         {
    824             cv::Vec4s val1 = mat1_row[x];
    825             float val2 = mat2_row[x];
    826             cv::Vec4s actual = dst_row[x];
    827 
    828             cv::Vec4s gold;
    829 
    830             gold[0] = cv::saturate_cast<short>(val1[0] * val2);
    831             gold[1] = cv::saturate_cast<short>(val1[1] * val2);
    832             gold[2] = cv::saturate_cast<short>(val1[2] * val2);
    833             gold[3] = cv::saturate_cast<short>(val1[3] * val2);
    834 
    835             ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
    836             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
    837             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
    838             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
    839         }
    840     }
    841 }
    842 
    843 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Array_Special, testing::Combine(
    844     ALL_DEVICES,
    845     DIFFERENT_SIZES,
    846     WHOLE_SUBMAT));
    847 
    848 ////////////////////////////////////////////////////////////////////////////////
    849 // Multiply_Scalar
    850 
    851 PARAM_TEST_CASE(Multiply_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    852 {
    853     cv::cuda::DeviceInfo devInfo;
    854     cv::Size size;
    855     std::pair<MatDepth, MatDepth> depth;
    856     bool useRoi;
    857 
    858     virtual void SetUp()
    859     {
    860         devInfo = GET_PARAM(0);
    861         size = GET_PARAM(1);
    862         depth = GET_PARAM(2);
    863         useRoi = GET_PARAM(3);
    864 
    865         cv::cuda::setDevice(devInfo.deviceID());
    866     }
    867 };
    868 
    869 CUDA_TEST_P(Multiply_Scalar, WithOutScale)
    870 {
    871     cv::Mat mat = randomMat(size, depth.first);
    872     cv::Scalar val = randomScalar(0, 255);
    873 
    874     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    875     {
    876         try
    877         {
    878             cv::cuda::GpuMat dst;
    879             cv::cuda::multiply(loadMat(mat), val, dst, 1, depth.second);
    880         }
    881         catch (const cv::Exception& e)
    882         {
    883             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    884         }
    885     }
    886     else
    887     {
    888         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    889         cv::cuda::multiply(loadMat(mat, useRoi), val, dst, 1, depth.second);
    890 
    891         cv::Mat dst_gold;
    892         cv::multiply(mat, val, dst_gold, 1, depth.second);
    893 
    894         EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
    895     }
    896 }
    897 
    898 
    899 CUDA_TEST_P(Multiply_Scalar, WithScale)
    900 {
    901     cv::Mat mat = randomMat(size, depth.first);
    902     cv::Scalar val = randomScalar(0, 255);
    903     double scale = randomDouble(0.0, 255.0);
    904 
    905     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    906     {
    907         try
    908         {
    909             cv::cuda::GpuMat dst;
    910             cv::cuda::multiply(loadMat(mat), val, dst, scale, depth.second);
    911         }
    912         catch (const cv::Exception& e)
    913         {
    914             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    915         }
    916     }
    917     else
    918     {
    919         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    920         cv::cuda::multiply(loadMat(mat, useRoi), val, dst, scale, depth.second);
    921 
    922         cv::Mat dst_gold;
    923         cv::multiply(mat, val, dst_gold, scale, depth.second);
    924 
    925         EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
    926     }
    927 }
    928 
    929 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Scalar, testing::Combine(
    930     ALL_DEVICES,
    931     DIFFERENT_SIZES,
    932     DEPTH_PAIRS,
    933     WHOLE_SUBMAT));
    934 
    935 ////////////////////////////////////////////////////////////////////////////////
    936 // Multiply_Scalar_First
    937 
    938 PARAM_TEST_CASE(Multiply_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
    939 {
    940     cv::cuda::DeviceInfo devInfo;
    941     cv::Size size;
    942     std::pair<MatDepth, MatDepth> depth;
    943     bool useRoi;
    944 
    945     virtual void SetUp()
    946     {
    947         devInfo = GET_PARAM(0);
    948         size = GET_PARAM(1);
    949         depth = GET_PARAM(2);
    950         useRoi = GET_PARAM(3);
    951 
    952         cv::cuda::setDevice(devInfo.deviceID());
    953     }
    954 };
    955 
    956 CUDA_TEST_P(Multiply_Scalar_First, WithOutScale)
    957 {
    958     cv::Mat mat = randomMat(size, depth.first);
    959     cv::Scalar val = randomScalar(0, 255);
    960 
    961     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    962     {
    963         try
    964         {
    965             cv::cuda::GpuMat dst;
    966             cv::cuda::multiply(val, loadMat(mat), dst, 1, depth.second);
    967         }
    968         catch (const cv::Exception& e)
    969         {
    970             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
    971         }
    972     }
    973     else
    974     {
    975         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
    976         cv::cuda::multiply(val, loadMat(mat, useRoi), dst, 1, depth.second);
    977 
    978         cv::Mat dst_gold;
    979         cv::multiply(val, mat, dst_gold, 1, depth.second);
    980 
    981         EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
    982     }
    983 }
    984 
    985 
    986 CUDA_TEST_P(Multiply_Scalar_First, WithScale)
    987 {
    988     cv::Mat mat = randomMat(size, depth.first);
    989     cv::Scalar val = randomScalar(0, 255);
    990     double scale = randomDouble(0.0, 255.0);
    991 
    992     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
    993     {
    994         try
    995         {
    996             cv::cuda::GpuMat dst;
    997             cv::cuda::multiply(val, loadMat(mat), dst, scale, depth.second);
    998         }
    999         catch (const cv::Exception& e)
   1000         {
   1001             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1002         }
   1003     }
   1004     else
   1005     {
   1006         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
   1007         cv::cuda::multiply(val, loadMat(mat, useRoi), dst, scale, depth.second);
   1008 
   1009         cv::Mat dst_gold;
   1010         cv::multiply(val, mat, dst_gold, scale, depth.second);
   1011 
   1012         EXPECT_MAT_NEAR(dst_gold, dst, 1.0);
   1013     }
   1014 }
   1015 
   1016 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Multiply_Scalar_First, testing::Combine(
   1017     ALL_DEVICES,
   1018     DIFFERENT_SIZES,
   1019     DEPTH_PAIRS,
   1020     WHOLE_SUBMAT));
   1021 
   1022 ////////////////////////////////////////////////////////////////////////////////
   1023 // Divide_Array
   1024 
   1025 PARAM_TEST_CASE(Divide_Array, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, Channels, UseRoi)
   1026 {
   1027     cv::cuda::DeviceInfo devInfo;
   1028     cv::Size size;
   1029     std::pair<MatDepth, MatDepth> depth;
   1030     int channels;
   1031     bool useRoi;
   1032 
   1033     int stype;
   1034     int dtype;
   1035 
   1036     virtual void SetUp()
   1037     {
   1038         devInfo = GET_PARAM(0);
   1039         size = GET_PARAM(1);
   1040         depth = GET_PARAM(2);
   1041         channels = GET_PARAM(3);
   1042         useRoi = GET_PARAM(4);
   1043 
   1044         cv::cuda::setDevice(devInfo.deviceID());
   1045 
   1046         stype = CV_MAKE_TYPE(depth.first, channels);
   1047         dtype = CV_MAKE_TYPE(depth.second, channels);
   1048     }
   1049 };
   1050 
   1051 CUDA_TEST_P(Divide_Array, WithOutScale)
   1052 {
   1053     cv::Mat mat1 = randomMat(size, stype);
   1054     cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
   1055 
   1056     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1057     {
   1058         try
   1059         {
   1060             cv::cuda::GpuMat dst;
   1061             cv::cuda::divide(loadMat(mat1), loadMat(mat2), dst, 1, depth.second);
   1062         }
   1063         catch (const cv::Exception& e)
   1064         {
   1065             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1066         }
   1067     }
   1068     else
   1069     {
   1070         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
   1071         cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, 1, depth.second);
   1072 
   1073         cv::Mat dst_gold;
   1074         cv::divide(mat1, mat2, dst_gold, 1, depth.second);
   1075 
   1076         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
   1077     }
   1078 }
   1079 
   1080 CUDA_TEST_P(Divide_Array, WithScale)
   1081 {
   1082     cv::Mat mat1 = randomMat(size, stype);
   1083     cv::Mat mat2 = randomMat(size, stype, 1.0, 255.0);
   1084     double scale = randomDouble(0.0, 255.0);
   1085 
   1086     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1087     {
   1088         try
   1089         {
   1090             cv::cuda::GpuMat dst;
   1091             cv::cuda::divide(loadMat(mat1), loadMat(mat2), dst, scale, depth.second);
   1092         }
   1093         catch (const cv::Exception& e)
   1094         {
   1095             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1096         }
   1097     }
   1098     else
   1099     {
   1100         cv::cuda::GpuMat dst = createMat(size, dtype, useRoi);
   1101         cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst, scale, depth.second);
   1102 
   1103         cv::Mat dst_gold;
   1104         cv::divide(mat1, mat2, dst_gold, scale, depth.second);
   1105 
   1106         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 1.0);
   1107     }
   1108 }
   1109 
   1110 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Array, testing::Combine(
   1111     ALL_DEVICES,
   1112     DIFFERENT_SIZES,
   1113     DEPTH_PAIRS,
   1114     ALL_CHANNELS,
   1115     WHOLE_SUBMAT));
   1116 
   1117 ////////////////////////////////////////////////////////////////////////////////
   1118 // Divide_Array_Special
   1119 
   1120 PARAM_TEST_CASE(Divide_Array_Special, cv::cuda::DeviceInfo, cv::Size, UseRoi)
   1121 {
   1122     cv::cuda::DeviceInfo devInfo;
   1123     cv::Size size;
   1124     bool useRoi;
   1125 
   1126     virtual void SetUp()
   1127     {
   1128         devInfo = GET_PARAM(0);
   1129         size = GET_PARAM(1);
   1130         useRoi = GET_PARAM(2);
   1131 
   1132         cv::cuda::setDevice(devInfo.deviceID());
   1133     }
   1134 };
   1135 
   1136 CUDA_TEST_P(Divide_Array_Special, Case_8UC4x_32FC1)
   1137 {
   1138     cv::Mat mat1 = randomMat(size, CV_8UC4);
   1139     cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
   1140 
   1141     cv::cuda::GpuMat dst = createMat(size, CV_8UC4, useRoi);
   1142     cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
   1143 
   1144     cv::Mat h_dst(dst);
   1145 
   1146     for (int y = 0; y < h_dst.rows; ++y)
   1147     {
   1148         const cv::Vec4b* mat1_row = mat1.ptr<cv::Vec4b>(y);
   1149         const float* mat2_row = mat2.ptr<float>(y);
   1150         const cv::Vec4b* dst_row = h_dst.ptr<cv::Vec4b>(y);
   1151 
   1152         for (int x = 0; x < h_dst.cols; ++x)
   1153         {
   1154             cv::Vec4b val1 = mat1_row[x];
   1155             float val2 = mat2_row[x];
   1156             cv::Vec4b actual = dst_row[x];
   1157 
   1158             cv::Vec4b gold;
   1159 
   1160             gold[0] = cv::saturate_cast<uchar>(val1[0] / val2);
   1161             gold[1] = cv::saturate_cast<uchar>(val1[1] / val2);
   1162             gold[2] = cv::saturate_cast<uchar>(val1[2] / val2);
   1163             gold[3] = cv::saturate_cast<uchar>(val1[3] / val2);
   1164 
   1165             ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
   1166             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
   1167             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
   1168             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
   1169         }
   1170     }
   1171 }
   1172 
   1173 CUDA_TEST_P(Divide_Array_Special, Case_16SC4x_32FC1)
   1174 {
   1175     cv::Mat mat1 = randomMat(size, CV_16SC4);
   1176     cv::Mat mat2 = randomMat(size, CV_32FC1, 1.0, 255.0);
   1177 
   1178     cv::cuda::GpuMat dst = createMat(size, CV_16SC4, useRoi);
   1179     cv::cuda::divide(loadMat(mat1, useRoi), loadMat(mat2, useRoi), dst);
   1180 
   1181     cv::Mat h_dst(dst);
   1182 
   1183     for (int y = 0; y < h_dst.rows; ++y)
   1184     {
   1185         const cv::Vec4s* mat1_row = mat1.ptr<cv::Vec4s>(y);
   1186         const float* mat2_row = mat2.ptr<float>(y);
   1187         const cv::Vec4s* dst_row = h_dst.ptr<cv::Vec4s>(y);
   1188 
   1189         for (int x = 0; x < h_dst.cols; ++x)
   1190         {
   1191             cv::Vec4s val1 = mat1_row[x];
   1192             float val2 = mat2_row[x];
   1193             cv::Vec4s actual = dst_row[x];
   1194 
   1195             cv::Vec4s gold;
   1196 
   1197             gold[0] = cv::saturate_cast<short>(val1[0] / val2);
   1198             gold[1] = cv::saturate_cast<short>(val1[1] / val2);
   1199             gold[2] = cv::saturate_cast<short>(val1[2] / val2);
   1200             gold[3] = cv::saturate_cast<short>(val1[3] / val2);
   1201 
   1202             ASSERT_LE(std::abs(gold[0] - actual[0]), 1.0);
   1203             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
   1204             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
   1205             ASSERT_LE(std::abs(gold[1] - actual[1]), 1.0);
   1206         }
   1207     }
   1208 }
   1209 
   1210 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Array_Special, testing::Combine(
   1211     ALL_DEVICES,
   1212     DIFFERENT_SIZES,
   1213     WHOLE_SUBMAT));
   1214 
   1215 ////////////////////////////////////////////////////////////////////////////////
   1216 // Divide_Scalar
   1217 
   1218 PARAM_TEST_CASE(Divide_Scalar, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
   1219 {
   1220     cv::cuda::DeviceInfo devInfo;
   1221     cv::Size size;
   1222     std::pair<MatDepth, MatDepth> depth;
   1223     bool useRoi;
   1224 
   1225     virtual void SetUp()
   1226     {
   1227         devInfo = GET_PARAM(0);
   1228         size = GET_PARAM(1);
   1229         depth = GET_PARAM(2);
   1230         useRoi = GET_PARAM(3);
   1231 
   1232         cv::cuda::setDevice(devInfo.deviceID());
   1233     }
   1234 };
   1235 
   1236 CUDA_TEST_P(Divide_Scalar, WithOutScale)
   1237 {
   1238     cv::Mat mat = randomMat(size, depth.first);
   1239     cv::Scalar val = randomScalar(1.0, 255.0);
   1240 
   1241     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1242     {
   1243         try
   1244         {
   1245             cv::cuda::GpuMat dst;
   1246             cv::cuda::divide(loadMat(mat), val, dst, 1, depth.second);
   1247         }
   1248         catch (const cv::Exception& e)
   1249         {
   1250             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1251         }
   1252     }
   1253     else
   1254     {
   1255         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
   1256         cv::cuda::divide(loadMat(mat, useRoi), val, dst, 1, depth.second);
   1257 
   1258         cv::Mat dst_gold;
   1259         cv::divide(mat, val, dst_gold, 1, depth.second);
   1260 
   1261         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
   1262     }
   1263 }
   1264 
   1265 CUDA_TEST_P(Divide_Scalar, WithScale)
   1266 {
   1267     cv::Mat mat = randomMat(size, depth.first);
   1268     cv::Scalar val = randomScalar(1.0, 255.0);
   1269     double scale = randomDouble(0.0, 255.0);
   1270 
   1271     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1272     {
   1273         try
   1274         {
   1275             cv::cuda::GpuMat dst;
   1276             cv::cuda::divide(loadMat(mat), val, dst, scale, depth.second);
   1277         }
   1278         catch (const cv::Exception& e)
   1279         {
   1280             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1281         }
   1282     }
   1283     else
   1284     {
   1285         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
   1286         cv::cuda::divide(loadMat(mat, useRoi), val, dst, scale, depth.second);
   1287 
   1288         cv::Mat dst_gold;
   1289         cv::divide(mat, val, dst_gold, scale, depth.second);
   1290 
   1291         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-2 : 1.0);
   1292     }
   1293 }
   1294 
   1295 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Scalar, testing::Combine(
   1296     ALL_DEVICES,
   1297     DIFFERENT_SIZES,
   1298     DEPTH_PAIRS,
   1299     WHOLE_SUBMAT));
   1300 
   1301 ////////////////////////////////////////////////////////////////////////////////
   1302 // Divide_Scalar_First
   1303 
   1304 PARAM_TEST_CASE(Divide_Scalar_First, cv::cuda::DeviceInfo, cv::Size, std::pair<MatDepth, MatDepth>, UseRoi)
   1305 {
   1306     cv::cuda::DeviceInfo devInfo;
   1307     cv::Size size;
   1308     std::pair<MatDepth, MatDepth> depth;
   1309     bool useRoi;
   1310 
   1311     virtual void SetUp()
   1312     {
   1313         devInfo = GET_PARAM(0);
   1314         size = GET_PARAM(1);
   1315         depth = GET_PARAM(2);
   1316         useRoi = GET_PARAM(3);
   1317 
   1318         cv::cuda::setDevice(devInfo.deviceID());
   1319     }
   1320 };
   1321 
   1322 CUDA_TEST_P(Divide_Scalar_First, Accuracy)
   1323 {
   1324     double scale = randomDouble(0.0, 255.0);
   1325     cv::Mat mat = randomMat(size, depth.first, 1.0, 255.0);
   1326 
   1327     if ((depth.first == CV_64F || depth.second == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1328     {
   1329         try
   1330         {
   1331             cv::cuda::GpuMat dst;
   1332             cv::cuda::divide(scale, loadMat(mat), dst, 1.0, depth.second);
   1333         }
   1334         catch (const cv::Exception& e)
   1335         {
   1336             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1337         }
   1338     }
   1339     else
   1340     {
   1341         cv::cuda::GpuMat dst = createMat(size, depth.second, useRoi);
   1342         cv::cuda::divide(scale, loadMat(mat, useRoi), dst, 1.0, depth.second);
   1343 
   1344         cv::Mat dst_gold;
   1345         cv::divide(scale, mat, dst_gold, depth.second);
   1346 
   1347         EXPECT_MAT_NEAR(dst_gold, dst, depth.first >= CV_32F || depth.second >= CV_32F ? 1e-4 : 1.0);
   1348     }
   1349 }
   1350 
   1351 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Divide_Scalar_First, testing::Combine(
   1352     ALL_DEVICES,
   1353     DIFFERENT_SIZES,
   1354     DEPTH_PAIRS,
   1355     WHOLE_SUBMAT));
   1356 
   1357 ////////////////////////////////////////////////////////////////////////////////
   1358 // AbsDiff
   1359 
   1360 PARAM_TEST_CASE(AbsDiff, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1361 {
   1362     cv::cuda::DeviceInfo devInfo;
   1363     cv::Size size;
   1364     int depth;
   1365     bool useRoi;
   1366 
   1367     virtual void SetUp()
   1368     {
   1369         devInfo = GET_PARAM(0);
   1370         size = GET_PARAM(1);
   1371         depth = GET_PARAM(2);
   1372         useRoi = GET_PARAM(3);
   1373 
   1374         cv::cuda::setDevice(devInfo.deviceID());
   1375     }
   1376 };
   1377 
   1378 CUDA_TEST_P(AbsDiff, Array)
   1379 {
   1380     cv::Mat src1 = randomMat(size, depth);
   1381     cv::Mat src2 = randomMat(size, depth);
   1382 
   1383     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1384     {
   1385         try
   1386         {
   1387             cv::cuda::GpuMat dst;
   1388             cv::cuda::absdiff(loadMat(src1), loadMat(src2), dst);
   1389         }
   1390         catch (const cv::Exception& e)
   1391         {
   1392             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1393         }
   1394     }
   1395     else
   1396     {
   1397         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1398         cv::cuda::absdiff(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
   1399 
   1400         cv::Mat dst_gold;
   1401         cv::absdiff(src1, src2, dst_gold);
   1402 
   1403         EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   1404     }
   1405 }
   1406 
   1407 CUDA_TEST_P(AbsDiff, Scalar)
   1408 {
   1409     cv::Mat src = randomMat(size, depth);
   1410     cv::Scalar val = randomScalar(0.0, 255.0);
   1411 
   1412     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1413     {
   1414         try
   1415         {
   1416             cv::cuda::GpuMat dst;
   1417             cv::cuda::absdiff(loadMat(src), val, dst);
   1418         }
   1419         catch (const cv::Exception& e)
   1420         {
   1421             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1422         }
   1423     }
   1424     else
   1425     {
   1426         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1427         cv::cuda::absdiff(loadMat(src, useRoi), val, dst);
   1428 
   1429         cv::Mat dst_gold;
   1430         cv::absdiff(src, val, dst_gold);
   1431 
   1432         EXPECT_MAT_NEAR(dst_gold, dst, depth <= CV_32F ? 1.0 : 1e-5);
   1433     }
   1434 }
   1435 
   1436 CUDA_TEST_P(AbsDiff, Scalar_First)
   1437 {
   1438     cv::Mat src = randomMat(size, depth);
   1439     cv::Scalar val = randomScalar(0.0, 255.0);
   1440 
   1441     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1442     {
   1443         try
   1444         {
   1445             cv::cuda::GpuMat dst;
   1446             cv::cuda::absdiff(val, loadMat(src), dst);
   1447         }
   1448         catch (const cv::Exception& e)
   1449         {
   1450             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1451         }
   1452     }
   1453     else
   1454     {
   1455         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1456         cv::cuda::absdiff(val, loadMat(src, useRoi), dst);
   1457 
   1458         cv::Mat dst_gold;
   1459         cv::absdiff(val, src, dst_gold);
   1460 
   1461         EXPECT_MAT_NEAR(dst_gold, dst, depth <= CV_32F ? 1.0 : 1e-5);
   1462     }
   1463 }
   1464 
   1465 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, AbsDiff, testing::Combine(
   1466     ALL_DEVICES,
   1467     DIFFERENT_SIZES,
   1468     ALL_DEPTH,
   1469     WHOLE_SUBMAT));
   1470 
   1471 ////////////////////////////////////////////////////////////////////////////////
   1472 // Abs
   1473 
   1474 PARAM_TEST_CASE(Abs, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1475 {
   1476     cv::cuda::DeviceInfo devInfo;
   1477     cv::Size size;
   1478     int depth;
   1479     bool useRoi;
   1480 
   1481     virtual void SetUp()
   1482     {
   1483         devInfo = GET_PARAM(0);
   1484         size = GET_PARAM(1);
   1485         depth = GET_PARAM(2);
   1486         useRoi = GET_PARAM(3);
   1487 
   1488         cv::cuda::setDevice(devInfo.deviceID());
   1489     }
   1490 };
   1491 
   1492 CUDA_TEST_P(Abs, Accuracy)
   1493 {
   1494     cv::Mat src = randomMat(size, depth);
   1495 
   1496     cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1497     cv::cuda::abs(loadMat(src, useRoi), dst);
   1498 
   1499     cv::Mat dst_gold = cv::abs(src);
   1500 
   1501     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   1502 }
   1503 
   1504 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Abs, testing::Combine(
   1505     ALL_DEVICES,
   1506     DIFFERENT_SIZES,
   1507     testing::Values(MatDepth(CV_16S), MatDepth(CV_32F)),
   1508     WHOLE_SUBMAT));
   1509 
   1510 ////////////////////////////////////////////////////////////////////////////////
   1511 // Sqr
   1512 
   1513 PARAM_TEST_CASE(Sqr, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1514 {
   1515     cv::cuda::DeviceInfo devInfo;
   1516     cv::Size size;
   1517     int depth;
   1518     bool useRoi;
   1519 
   1520     virtual void SetUp()
   1521     {
   1522         devInfo = GET_PARAM(0);
   1523         size = GET_PARAM(1);
   1524         depth = GET_PARAM(2);
   1525         useRoi = GET_PARAM(3);
   1526 
   1527         cv::cuda::setDevice(devInfo.deviceID());
   1528     }
   1529 };
   1530 
   1531 CUDA_TEST_P(Sqr, Accuracy)
   1532 {
   1533     cv::Mat src = randomMat(size, depth, 0, depth == CV_8U ? 16 : 255);
   1534 
   1535     cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1536     cv::cuda::sqr(loadMat(src, useRoi), dst);
   1537 
   1538     cv::Mat dst_gold;
   1539     cv::multiply(src, src, dst_gold);
   1540 
   1541     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   1542 }
   1543 
   1544 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sqr, testing::Combine(
   1545     ALL_DEVICES,
   1546     DIFFERENT_SIZES,
   1547     testing::Values(MatDepth(CV_8U),
   1548                     MatDepth(CV_16U),
   1549                     MatDepth(CV_16S),
   1550                     MatDepth(CV_32F)),
   1551     WHOLE_SUBMAT));
   1552 
   1553 ////////////////////////////////////////////////////////////////////////////////
   1554 // Sqrt
   1555 
   1556 namespace
   1557 {
   1558     template <typename T> void sqrtImpl(const cv::Mat& src, cv::Mat& dst)
   1559     {
   1560         dst.create(src.size(), src.type());
   1561 
   1562         for (int y = 0; y < src.rows; ++y)
   1563         {
   1564             for (int x = 0; x < src.cols; ++x)
   1565                 dst.at<T>(y, x) = static_cast<T>(std::sqrt(static_cast<float>(src.at<T>(y, x))));
   1566         }
   1567     }
   1568 
   1569     void sqrtGold(const cv::Mat& src, cv::Mat& dst)
   1570     {
   1571         typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
   1572 
   1573         const func_t funcs[] =
   1574         {
   1575             sqrtImpl<uchar>, sqrtImpl<schar>, sqrtImpl<ushort>, sqrtImpl<short>,
   1576             sqrtImpl<int>, sqrtImpl<float>
   1577         };
   1578 
   1579         funcs[src.depth()](src, dst);
   1580     }
   1581 }
   1582 
   1583 PARAM_TEST_CASE(Sqrt, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1584 {
   1585     cv::cuda::DeviceInfo devInfo;
   1586     cv::Size size;
   1587     int depth;
   1588     bool useRoi;
   1589 
   1590     virtual void SetUp()
   1591     {
   1592         devInfo = GET_PARAM(0);
   1593         size = GET_PARAM(1);
   1594         depth = GET_PARAM(2);
   1595         useRoi = GET_PARAM(3);
   1596 
   1597         cv::cuda::setDevice(devInfo.deviceID());
   1598     }
   1599 };
   1600 
   1601 CUDA_TEST_P(Sqrt, Accuracy)
   1602 {
   1603     cv::Mat src = randomMat(size, depth);
   1604 
   1605     cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1606     cv::cuda::sqrt(loadMat(src, useRoi), dst);
   1607 
   1608     cv::Mat dst_gold;
   1609     sqrtGold(src, dst_gold);
   1610 
   1611     EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
   1612 }
   1613 
   1614 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sqrt, testing::Combine(
   1615     ALL_DEVICES,
   1616     DIFFERENT_SIZES,
   1617     testing::Values(MatDepth(CV_8U),
   1618                     MatDepth(CV_16U),
   1619                     MatDepth(CV_16S),
   1620                     MatDepth(CV_32F)),
   1621     WHOLE_SUBMAT));
   1622 
   1623 ////////////////////////////////////////////////////////////////////////////////
   1624 // Log
   1625 
   1626 namespace
   1627 {
   1628     template <typename T> void logImpl(const cv::Mat& src, cv::Mat& dst)
   1629     {
   1630         dst.create(src.size(), src.type());
   1631 
   1632         for (int y = 0; y < src.rows; ++y)
   1633         {
   1634             for (int x = 0; x < src.cols; ++x)
   1635                 dst.at<T>(y, x) = static_cast<T>(std::log(static_cast<float>(src.at<T>(y, x))));
   1636         }
   1637     }
   1638 
   1639     void logGold(const cv::Mat& src, cv::Mat& dst)
   1640     {
   1641         typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
   1642 
   1643         const func_t funcs[] =
   1644         {
   1645             logImpl<uchar>, logImpl<schar>, logImpl<ushort>, logImpl<short>,
   1646             logImpl<int>, logImpl<float>
   1647         };
   1648 
   1649         funcs[src.depth()](src, dst);
   1650     }
   1651 }
   1652 
   1653 PARAM_TEST_CASE(Log, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1654 {
   1655     cv::cuda::DeviceInfo devInfo;
   1656     cv::Size size;
   1657     int depth;
   1658     bool useRoi;
   1659 
   1660     virtual void SetUp()
   1661     {
   1662         devInfo = GET_PARAM(0);
   1663         size = GET_PARAM(1);
   1664         depth = GET_PARAM(2);
   1665         useRoi = GET_PARAM(3);
   1666 
   1667         cv::cuda::setDevice(devInfo.deviceID());
   1668     }
   1669 };
   1670 
   1671 CUDA_TEST_P(Log, Accuracy)
   1672 {
   1673     cv::Mat src = randomMat(size, depth, 1.0, 255.0);
   1674 
   1675     cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1676     cv::cuda::log(loadMat(src, useRoi), dst);
   1677 
   1678     cv::Mat dst_gold;
   1679     logGold(src, dst_gold);
   1680 
   1681     EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-6);
   1682 }
   1683 
   1684 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Log, testing::Combine(
   1685     ALL_DEVICES,
   1686     DIFFERENT_SIZES,
   1687     testing::Values(MatDepth(CV_8U),
   1688                     MatDepth(CV_16U),
   1689                     MatDepth(CV_16S),
   1690                     MatDepth(CV_32F)),
   1691     WHOLE_SUBMAT));
   1692 
   1693 ////////////////////////////////////////////////////////////////////////////////
   1694 // Exp
   1695 
   1696 namespace
   1697 {
   1698     template <typename T> void expImpl(const cv::Mat& src, cv::Mat& dst)
   1699     {
   1700         dst.create(src.size(), src.type());
   1701 
   1702         for (int y = 0; y < src.rows; ++y)
   1703         {
   1704             for (int x = 0; x < src.cols; ++x)
   1705                 dst.at<T>(y, x) = cv::saturate_cast<T>(static_cast<int>(std::exp(static_cast<float>(src.at<T>(y, x)))));
   1706         }
   1707     }
   1708     void expImpl_float(const cv::Mat& src, cv::Mat& dst)
   1709     {
   1710         dst.create(src.size(), src.type());
   1711 
   1712         for (int y = 0; y < src.rows; ++y)
   1713         {
   1714             for (int x = 0; x < src.cols; ++x)
   1715                 dst.at<float>(y, x) = std::exp(static_cast<float>(src.at<float>(y, x)));
   1716         }
   1717     }
   1718 
   1719     void expGold(const cv::Mat& src, cv::Mat& dst)
   1720     {
   1721         typedef void (*func_t)(const cv::Mat& src, cv::Mat& dst);
   1722 
   1723         const func_t funcs[] =
   1724         {
   1725             expImpl<uchar>, expImpl<schar>, expImpl<ushort>, expImpl<short>,
   1726             expImpl<int>, expImpl_float
   1727         };
   1728 
   1729         funcs[src.depth()](src, dst);
   1730     }
   1731 }
   1732 
   1733 PARAM_TEST_CASE(Exp, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1734 {
   1735     cv::cuda::DeviceInfo devInfo;
   1736     cv::Size size;
   1737     int depth;
   1738     bool useRoi;
   1739 
   1740     virtual void SetUp()
   1741     {
   1742         devInfo = GET_PARAM(0);
   1743         size = GET_PARAM(1);
   1744         depth = GET_PARAM(2);
   1745         useRoi = GET_PARAM(3);
   1746 
   1747         cv::cuda::setDevice(devInfo.deviceID());
   1748     }
   1749 };
   1750 
   1751 CUDA_TEST_P(Exp, Accuracy)
   1752 {
   1753     cv::Mat src = randomMat(size, depth, 0.0, 10.0);
   1754 
   1755     cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1756     cv::cuda::exp(loadMat(src, useRoi), dst);
   1757 
   1758     cv::Mat dst_gold;
   1759     expGold(src, dst_gold);
   1760 
   1761     EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-2);
   1762 }
   1763 
   1764 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Exp, testing::Combine(
   1765     ALL_DEVICES,
   1766     DIFFERENT_SIZES,
   1767     testing::Values(MatDepth(CV_8U),
   1768                     MatDepth(CV_16U),
   1769                     MatDepth(CV_16S),
   1770                     MatDepth(CV_32F)),
   1771     WHOLE_SUBMAT));
   1772 
   1773 ////////////////////////////////////////////////////////////////////////////////
   1774 // Pow
   1775 
   1776 PARAM_TEST_CASE(Pow, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   1777 {
   1778     cv::cuda::DeviceInfo devInfo;
   1779     cv::Size size;
   1780     int depth;
   1781     bool useRoi;
   1782 
   1783     virtual void SetUp()
   1784     {
   1785         devInfo = GET_PARAM(0);
   1786         size = GET_PARAM(1);
   1787         depth = GET_PARAM(2);
   1788         useRoi = GET_PARAM(3);
   1789 
   1790         cv::cuda::setDevice(devInfo.deviceID());
   1791     }
   1792 };
   1793 
   1794 CUDA_TEST_P(Pow, Accuracy)
   1795 {
   1796     cv::Mat src = randomMat(size, depth, 0.0, 10.0);
   1797     double power = randomDouble(2.0, 4.0);
   1798 
   1799     if (src.depth() < CV_32F)
   1800         power = static_cast<int>(power);
   1801 
   1802     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1803     {
   1804         try
   1805         {
   1806             cv::cuda::GpuMat dst;
   1807             cv::cuda::pow(loadMat(src), power, dst);
   1808         }
   1809         catch (const cv::Exception& e)
   1810         {
   1811             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1812         }
   1813     }
   1814     else
   1815     {
   1816         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   1817         cv::cuda::pow(loadMat(src, useRoi), power, dst);
   1818 
   1819         cv::Mat dst_gold;
   1820         cv::pow(src, power, dst_gold);
   1821 
   1822         EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 0.0 : 1e-1);
   1823     }
   1824 }
   1825 
   1826 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Pow, testing::Combine(
   1827     ALL_DEVICES,
   1828     DIFFERENT_SIZES,
   1829     ALL_DEPTH,
   1830     WHOLE_SUBMAT));
   1831 
   1832 ////////////////////////////////////////////////////////////////////////////////
   1833 // Compare_Array
   1834 
   1835 CV_ENUM(CmpCode, cv::CMP_EQ, cv::CMP_GT, cv::CMP_GE, cv::CMP_LT, cv::CMP_LE, cv::CMP_NE)
   1836 #define ALL_CMP_CODES testing::Values(CmpCode(cv::CMP_EQ), CmpCode(cv::CMP_NE), CmpCode(cv::CMP_GT), CmpCode(cv::CMP_GE), CmpCode(cv::CMP_LT), CmpCode(cv::CMP_LE))
   1837 
   1838 PARAM_TEST_CASE(Compare_Array, cv::cuda::DeviceInfo, cv::Size, MatDepth, CmpCode, UseRoi)
   1839 {
   1840     cv::cuda::DeviceInfo devInfo;
   1841     cv::Size size;
   1842     int depth;
   1843     int cmp_code;
   1844     bool useRoi;
   1845 
   1846     virtual void SetUp()
   1847     {
   1848         devInfo = GET_PARAM(0);
   1849         size = GET_PARAM(1);
   1850         depth = GET_PARAM(2);
   1851         cmp_code = GET_PARAM(3);
   1852         useRoi = GET_PARAM(4);
   1853 
   1854         cv::cuda::setDevice(devInfo.deviceID());
   1855     }
   1856 };
   1857 
   1858 CUDA_TEST_P(Compare_Array, Accuracy)
   1859 {
   1860     cv::Mat src1 = randomMat(size, depth);
   1861     cv::Mat src2 = randomMat(size, depth);
   1862 
   1863     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1864     {
   1865         try
   1866         {
   1867             cv::cuda::GpuMat dst;
   1868             cv::cuda::compare(loadMat(src1), loadMat(src2), dst, cmp_code);
   1869         }
   1870         catch (const cv::Exception& e)
   1871         {
   1872             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1873         }
   1874     }
   1875     else
   1876     {
   1877         cv::cuda::GpuMat dst = createMat(size, CV_8UC1, useRoi);
   1878         cv::cuda::compare(loadMat(src1, useRoi), loadMat(src2, useRoi), dst, cmp_code);
   1879 
   1880         cv::Mat dst_gold;
   1881         cv::compare(src1, src2, dst_gold, cmp_code);
   1882 
   1883         EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   1884     }
   1885 }
   1886 
   1887 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Compare_Array, testing::Combine(
   1888     ALL_DEVICES,
   1889     DIFFERENT_SIZES,
   1890     ALL_DEPTH,
   1891     ALL_CMP_CODES,
   1892     WHOLE_SUBMAT));
   1893 
   1894 ////////////////////////////////////////////////////////////////////////////////
   1895 // Compare_Scalar
   1896 
   1897 namespace
   1898 {
   1899     template <template <typename> class Op, typename T>
   1900     void compareScalarImpl(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst)
   1901     {
   1902         Op<T> op;
   1903 
   1904         const int cn = src.channels();
   1905 
   1906         dst.create(src.size(), CV_MAKE_TYPE(CV_8U, cn));
   1907 
   1908         for (int y = 0; y < src.rows; ++y)
   1909         {
   1910             for (int x = 0; x < src.cols; ++x)
   1911             {
   1912                 for (int c = 0; c < cn; ++c)
   1913                 {
   1914                     T src_val = src.at<T>(y, x * cn + c);
   1915                     T sc_val = cv::saturate_cast<T>(sc.val[c]);
   1916                     dst.at<uchar>(y, x * cn + c) = static_cast<uchar>(static_cast<int>(op(src_val, sc_val)) * 255);
   1917                 }
   1918             }
   1919         }
   1920     }
   1921 
   1922     void compareScalarGold(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst, int cmpop)
   1923     {
   1924         typedef void (*func_t)(const cv::Mat& src, cv::Scalar sc, cv::Mat& dst);
   1925         static const func_t funcs[7][6] =
   1926         {
   1927             {compareScalarImpl<std::equal_to, unsigned char> , compareScalarImpl<std::greater, unsigned char> , compareScalarImpl<std::greater_equal, unsigned char> , compareScalarImpl<std::less, unsigned char> , compareScalarImpl<std::less_equal, unsigned char> , compareScalarImpl<std::not_equal_to, unsigned char> },
   1928             {compareScalarImpl<std::equal_to, signed char>   , compareScalarImpl<std::greater, signed char>   , compareScalarImpl<std::greater_equal, signed char>   , compareScalarImpl<std::less, signed char>   , compareScalarImpl<std::less_equal, signed char>   , compareScalarImpl<std::not_equal_to, signed char>   },
   1929             {compareScalarImpl<std::equal_to, unsigned short>, compareScalarImpl<std::greater, unsigned short>, compareScalarImpl<std::greater_equal, unsigned short>, compareScalarImpl<std::less, unsigned short>, compareScalarImpl<std::less_equal, unsigned short>, compareScalarImpl<std::not_equal_to, unsigned short>},
   1930             {compareScalarImpl<std::equal_to, short>         , compareScalarImpl<std::greater, short>         , compareScalarImpl<std::greater_equal, short>         , compareScalarImpl<std::less, short>         , compareScalarImpl<std::less_equal, short>         , compareScalarImpl<std::not_equal_to, short>         },
   1931             {compareScalarImpl<std::equal_to, int>           , compareScalarImpl<std::greater, int>           , compareScalarImpl<std::greater_equal, int>           , compareScalarImpl<std::less, int>           , compareScalarImpl<std::less_equal, int>           , compareScalarImpl<std::not_equal_to, int>           },
   1932             {compareScalarImpl<std::equal_to, float>         , compareScalarImpl<std::greater, float>         , compareScalarImpl<std::greater_equal, float>         , compareScalarImpl<std::less, float>         , compareScalarImpl<std::less_equal, float>         , compareScalarImpl<std::not_equal_to, float>         },
   1933             {compareScalarImpl<std::equal_to, double>        , compareScalarImpl<std::greater, double>        , compareScalarImpl<std::greater_equal, double>        , compareScalarImpl<std::less, double>        , compareScalarImpl<std::less_equal, double>        , compareScalarImpl<std::not_equal_to, double>        }
   1934         };
   1935 
   1936         funcs[src.depth()][cmpop](src, sc, dst);
   1937     }
   1938 }
   1939 
   1940 PARAM_TEST_CASE(Compare_Scalar, cv::cuda::DeviceInfo, cv::Size, MatType, CmpCode, UseRoi)
   1941 {
   1942     cv::cuda::DeviceInfo devInfo;
   1943     cv::Size size;
   1944     int type;
   1945     int cmp_code;
   1946     bool useRoi;
   1947 
   1948     virtual void SetUp()
   1949     {
   1950         devInfo = GET_PARAM(0);
   1951         size = GET_PARAM(1);
   1952         type = GET_PARAM(2);
   1953         cmp_code = GET_PARAM(3);
   1954         useRoi = GET_PARAM(4);
   1955 
   1956         cv::cuda::setDevice(devInfo.deviceID());
   1957     }
   1958 };
   1959 
   1960 CUDA_TEST_P(Compare_Scalar, Accuracy)
   1961 {
   1962     cv::Mat src = randomMat(size, type);
   1963     cv::Scalar sc = randomScalar(0.0, 255.0);
   1964 
   1965     if (src.depth() < CV_32F)
   1966     {
   1967         sc.val[0] = cvRound(sc.val[0]);
   1968         sc.val[1] = cvRound(sc.val[1]);
   1969         sc.val[2] = cvRound(sc.val[2]);
   1970         sc.val[3] = cvRound(sc.val[3]);
   1971     }
   1972 
   1973     if (src.depth() == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   1974     {
   1975         try
   1976         {
   1977             cv::cuda::GpuMat dst;
   1978             cv::cuda::compare(loadMat(src), sc, dst, cmp_code);
   1979         }
   1980         catch (const cv::Exception& e)
   1981         {
   1982             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   1983         }
   1984     }
   1985     else
   1986     {
   1987         cv::cuda::GpuMat dst = createMat(size, CV_MAKE_TYPE(CV_8U, src.channels()), useRoi);
   1988 
   1989         cv::cuda::compare(loadMat(src, useRoi), sc, dst, cmp_code);
   1990 
   1991         cv::Mat dst_gold;
   1992         compareScalarGold(src, sc, dst_gold, cmp_code);
   1993 
   1994         EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   1995     }
   1996 }
   1997 
   1998 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Compare_Scalar, testing::Combine(
   1999     ALL_DEVICES,
   2000     DIFFERENT_SIZES,
   2001     TYPES(CV_8U, CV_64F, 1, 4),
   2002     ALL_CMP_CODES,
   2003     WHOLE_SUBMAT));
   2004 
   2005 //////////////////////////////////////////////////////////////////////////////
   2006 // Bitwise_Array
   2007 
   2008 PARAM_TEST_CASE(Bitwise_Array, cv::cuda::DeviceInfo, cv::Size, MatType)
   2009 {
   2010     cv::cuda::DeviceInfo devInfo;
   2011     cv::Size size;
   2012     int type;
   2013 
   2014     cv::Mat src1;
   2015     cv::Mat src2;
   2016 
   2017     virtual void SetUp()
   2018     {
   2019         devInfo = GET_PARAM(0);
   2020         size = GET_PARAM(1);
   2021         type = GET_PARAM(2);
   2022 
   2023         cv::cuda::setDevice(devInfo.deviceID());
   2024 
   2025         src1 = randomMat(size, type, 0.0, std::numeric_limits<int>::max());
   2026         src2 = randomMat(size, type, 0.0, std::numeric_limits<int>::max());
   2027     }
   2028 };
   2029 
   2030 CUDA_TEST_P(Bitwise_Array, Not)
   2031 {
   2032     cv::cuda::GpuMat dst;
   2033     cv::cuda::bitwise_not(loadMat(src1), dst);
   2034 
   2035     cv::Mat dst_gold = ~src1;
   2036 
   2037     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2038 }
   2039 
   2040 CUDA_TEST_P(Bitwise_Array, Or)
   2041 {
   2042     cv::cuda::GpuMat dst;
   2043     cv::cuda::bitwise_or(loadMat(src1), loadMat(src2), dst);
   2044 
   2045     cv::Mat dst_gold = src1 | src2;
   2046 
   2047     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2048 }
   2049 
   2050 CUDA_TEST_P(Bitwise_Array, And)
   2051 {
   2052     cv::cuda::GpuMat dst;
   2053     cv::cuda::bitwise_and(loadMat(src1), loadMat(src2), dst);
   2054 
   2055     cv::Mat dst_gold = src1 & src2;
   2056 
   2057     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2058 }
   2059 
   2060 CUDA_TEST_P(Bitwise_Array, Xor)
   2061 {
   2062     cv::cuda::GpuMat dst;
   2063     cv::cuda::bitwise_xor(loadMat(src1), loadMat(src2), dst);
   2064 
   2065     cv::Mat dst_gold = src1 ^ src2;
   2066 
   2067     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2068 }
   2069 
   2070 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Bitwise_Array, testing::Combine(
   2071     ALL_DEVICES,
   2072     DIFFERENT_SIZES,
   2073     TYPES(CV_8U, CV_32S, 1, 4)));
   2074 
   2075 //////////////////////////////////////////////////////////////////////////////
   2076 // Bitwise_Scalar
   2077 
   2078 PARAM_TEST_CASE(Bitwise_Scalar, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels)
   2079 {
   2080     cv::cuda::DeviceInfo devInfo;
   2081     cv::Size size;
   2082     int depth;
   2083     int channels;
   2084 
   2085     cv::Mat src;
   2086     cv::Scalar val;
   2087 
   2088     virtual void SetUp()
   2089     {
   2090         devInfo = GET_PARAM(0);
   2091         size = GET_PARAM(1);
   2092         depth = GET_PARAM(2);
   2093         channels = GET_PARAM(3);
   2094 
   2095         cv::cuda::setDevice(devInfo.deviceID());
   2096 
   2097         src = randomMat(size, CV_MAKE_TYPE(depth, channels));
   2098         cv::Scalar_<int> ival = randomScalar(0.0, std::numeric_limits<int>::max());
   2099         val = ival;
   2100     }
   2101 };
   2102 
   2103 CUDA_TEST_P(Bitwise_Scalar, Or)
   2104 {
   2105     cv::cuda::GpuMat dst;
   2106     cv::cuda::bitwise_or(loadMat(src), val, dst);
   2107 
   2108     cv::Mat dst_gold;
   2109     cv::bitwise_or(src, val, dst_gold);
   2110 
   2111     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2112 }
   2113 
   2114 CUDA_TEST_P(Bitwise_Scalar, And)
   2115 {
   2116     cv::cuda::GpuMat dst;
   2117     cv::cuda::bitwise_and(loadMat(src), val, dst);
   2118 
   2119     cv::Mat dst_gold;
   2120     cv::bitwise_and(src, val, dst_gold);
   2121 
   2122     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2123 }
   2124 
   2125 CUDA_TEST_P(Bitwise_Scalar, Xor)
   2126 {
   2127     cv::cuda::GpuMat dst;
   2128     cv::cuda::bitwise_xor(loadMat(src), val, dst);
   2129 
   2130     cv::Mat dst_gold;
   2131     cv::bitwise_xor(src, val, dst_gold);
   2132 
   2133     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2134 }
   2135 
   2136 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Bitwise_Scalar, testing::Combine(
   2137     ALL_DEVICES,
   2138     DIFFERENT_SIZES,
   2139     testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
   2140     IMAGE_CHANNELS));
   2141 
   2142 //////////////////////////////////////////////////////////////////////////////
   2143 // RShift
   2144 
   2145 namespace
   2146 {
   2147     template <typename T> void rhiftImpl(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
   2148     {
   2149         const int cn = src.channels();
   2150 
   2151         dst.create(src.size(), src.type());
   2152 
   2153         for (int y = 0; y < src.rows; ++y)
   2154         {
   2155             for (int x = 0; x < src.cols; ++x)
   2156             {
   2157                 for (int c = 0; c < cn; ++c)
   2158                     dst.at<T>(y, x * cn + c) = src.at<T>(y, x * cn + c) >> val.val[c];
   2159             }
   2160         }
   2161     }
   2162 
   2163     void rhiftGold(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
   2164     {
   2165         typedef void (*func_t)(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst);
   2166 
   2167         const func_t funcs[] =
   2168         {
   2169             rhiftImpl<uchar>, rhiftImpl<schar>, rhiftImpl<ushort>, rhiftImpl<short>, rhiftImpl<int>
   2170         };
   2171 
   2172         funcs[src.depth()](src, val, dst);
   2173     }
   2174 }
   2175 
   2176 PARAM_TEST_CASE(RShift, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
   2177 {
   2178     cv::cuda::DeviceInfo devInfo;
   2179     cv::Size size;
   2180     int depth;
   2181     int channels;
   2182     bool useRoi;
   2183 
   2184     virtual void SetUp()
   2185     {
   2186         devInfo = GET_PARAM(0);
   2187         size = GET_PARAM(1);
   2188         depth = GET_PARAM(2);
   2189         channels = GET_PARAM(3);
   2190         useRoi = GET_PARAM(4);
   2191 
   2192         cv::cuda::setDevice(devInfo.deviceID());
   2193     }
   2194 };
   2195 
   2196 CUDA_TEST_P(RShift, Accuracy)
   2197 {
   2198     int type = CV_MAKE_TYPE(depth, channels);
   2199     cv::Mat src = randomMat(size, type);
   2200     cv::Scalar_<int> val = randomScalar(0.0, 8.0);
   2201 
   2202     cv::cuda::GpuMat dst = createMat(size, type, useRoi);
   2203     cv::cuda::rshift(loadMat(src, useRoi), val, dst);
   2204 
   2205     cv::Mat dst_gold;
   2206     rhiftGold(src, val, dst_gold);
   2207 
   2208     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2209 }
   2210 
   2211 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, RShift, testing::Combine(
   2212     ALL_DEVICES,
   2213     DIFFERENT_SIZES,
   2214     testing::Values(MatDepth(CV_8U),
   2215                     MatDepth(CV_8S),
   2216                     MatDepth(CV_16U),
   2217                     MatDepth(CV_16S),
   2218                     MatDepth(CV_32S)),
   2219     IMAGE_CHANNELS,
   2220     WHOLE_SUBMAT));
   2221 
   2222 //////////////////////////////////////////////////////////////////////////////
   2223 // LShift
   2224 
   2225 namespace
   2226 {
   2227     template <typename T> void lhiftImpl(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
   2228     {
   2229         const int cn = src.channels();
   2230 
   2231         dst.create(src.size(), src.type());
   2232 
   2233         for (int y = 0; y < src.rows; ++y)
   2234         {
   2235             for (int x = 0; x < src.cols; ++x)
   2236             {
   2237                 for (int c = 0; c < cn; ++c)
   2238                     dst.at<T>(y, x * cn + c) = src.at<T>(y, x * cn + c) << val.val[c];
   2239             }
   2240         }
   2241     }
   2242 
   2243     void lhiftGold(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst)
   2244     {
   2245         typedef void (*func_t)(const cv::Mat& src, cv::Scalar_<int> val, cv::Mat& dst);
   2246 
   2247         const func_t funcs[] =
   2248         {
   2249             lhiftImpl<uchar>, lhiftImpl<schar>, lhiftImpl<ushort>, lhiftImpl<short>, lhiftImpl<int>
   2250         };
   2251 
   2252         funcs[src.depth()](src, val, dst);
   2253     }
   2254 }
   2255 
   2256 PARAM_TEST_CASE(LShift, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
   2257 {
   2258     cv::cuda::DeviceInfo devInfo;
   2259     cv::Size size;
   2260     int depth;
   2261     int channels;
   2262     bool useRoi;
   2263 
   2264     virtual void SetUp()
   2265     {
   2266         devInfo = GET_PARAM(0);
   2267         size = GET_PARAM(1);
   2268         depth = GET_PARAM(2);
   2269         channels = GET_PARAM(3);
   2270         useRoi = GET_PARAM(4);
   2271 
   2272         cv::cuda::setDevice(devInfo.deviceID());
   2273     }
   2274 };
   2275 
   2276 CUDA_TEST_P(LShift, Accuracy)
   2277 {
   2278     int type = CV_MAKE_TYPE(depth, channels);
   2279     cv::Mat src = randomMat(size, type);
   2280     cv::Scalar_<int> val = randomScalar(0.0, 8.0);
   2281 
   2282     cv::cuda::GpuMat dst = createMat(size, type, useRoi);
   2283     cv::cuda::lshift(loadMat(src, useRoi), val, dst);
   2284 
   2285     cv::Mat dst_gold;
   2286     lhiftGold(src, val, dst_gold);
   2287 
   2288     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2289 }
   2290 
   2291 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, LShift, testing::Combine(
   2292     ALL_DEVICES,
   2293     DIFFERENT_SIZES,
   2294     testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32S)),
   2295     IMAGE_CHANNELS,
   2296     WHOLE_SUBMAT));
   2297 
   2298 //////////////////////////////////////////////////////////////////////////////
   2299 // Min
   2300 
   2301 PARAM_TEST_CASE(Min, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   2302 {
   2303     cv::cuda::DeviceInfo devInfo;
   2304     cv::Size size;
   2305     int depth;
   2306     bool useRoi;
   2307 
   2308     virtual void SetUp()
   2309     {
   2310         devInfo = GET_PARAM(0);
   2311         size = GET_PARAM(1);
   2312         depth = GET_PARAM(2);
   2313         useRoi = GET_PARAM(3);
   2314 
   2315         cv::cuda::setDevice(devInfo.deviceID());
   2316     }
   2317 };
   2318 
   2319 CUDA_TEST_P(Min, Array)
   2320 {
   2321     cv::Mat src1 = randomMat(size, depth);
   2322     cv::Mat src2 = randomMat(size, depth);
   2323 
   2324     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   2325     {
   2326         try
   2327         {
   2328             cv::cuda::GpuMat dst;
   2329             cv::cuda::min(loadMat(src1), loadMat(src2), dst);
   2330         }
   2331         catch (const cv::Exception& e)
   2332         {
   2333             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   2334         }
   2335     }
   2336     else
   2337     {
   2338         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   2339         cv::cuda::min(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
   2340 
   2341         cv::Mat dst_gold = cv::min(src1, src2);
   2342 
   2343         EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2344     }
   2345 }
   2346 
   2347 CUDA_TEST_P(Min, Scalar)
   2348 {
   2349     cv::Mat src = randomMat(size, depth);
   2350     double val = randomDouble(0.0, 255.0);
   2351 
   2352     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   2353     {
   2354         try
   2355         {
   2356             cv::cuda::GpuMat dst;
   2357             cv::cuda::min(loadMat(src), val, dst);
   2358         }
   2359         catch (const cv::Exception& e)
   2360         {
   2361             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   2362         }
   2363     }
   2364     else
   2365     {
   2366         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   2367         cv::cuda::min(loadMat(src, useRoi), val, dst);
   2368 
   2369         cv::Mat dst_gold = cv::min(src, val);
   2370 
   2371         EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
   2372     }
   2373 }
   2374 
   2375 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Min, testing::Combine(
   2376     ALL_DEVICES,
   2377     DIFFERENT_SIZES,
   2378     ALL_DEPTH,
   2379     WHOLE_SUBMAT));
   2380 
   2381 //////////////////////////////////////////////////////////////////////////////
   2382 // Max
   2383 
   2384 PARAM_TEST_CASE(Max, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
   2385 {
   2386     cv::cuda::DeviceInfo devInfo;
   2387     cv::Size size;
   2388     int depth;
   2389     bool useRoi;
   2390 
   2391     virtual void SetUp()
   2392     {
   2393         devInfo = GET_PARAM(0);
   2394         size = GET_PARAM(1);
   2395         depth = GET_PARAM(2);
   2396         useRoi = GET_PARAM(3);
   2397 
   2398         cv::cuda::setDevice(devInfo.deviceID());
   2399     }
   2400 };
   2401 
   2402 CUDA_TEST_P(Max, Array)
   2403 {
   2404     cv::Mat src1 = randomMat(size, depth);
   2405     cv::Mat src2 = randomMat(size, depth);
   2406 
   2407     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   2408     {
   2409         try
   2410         {
   2411             cv::cuda::GpuMat dst;
   2412             cv::cuda::max(loadMat(src1), loadMat(src2), dst);
   2413         }
   2414         catch (const cv::Exception& e)
   2415         {
   2416             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   2417         }
   2418     }
   2419     else
   2420     {
   2421         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   2422         cv::cuda::max(loadMat(src1, useRoi), loadMat(src2, useRoi), dst);
   2423 
   2424         cv::Mat dst_gold = cv::max(src1, src2);
   2425 
   2426         EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2427     }
   2428 }
   2429 
   2430 CUDA_TEST_P(Max, Scalar)
   2431 {
   2432     cv::Mat src = randomMat(size, depth);
   2433     double val = randomDouble(0.0, 255.0);
   2434 
   2435     if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   2436     {
   2437         try
   2438         {
   2439             cv::cuda::GpuMat dst;
   2440             cv::cuda::max(loadMat(src), val, dst);
   2441         }
   2442         catch (const cv::Exception& e)
   2443         {
   2444             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   2445         }
   2446     }
   2447     else
   2448     {
   2449         cv::cuda::GpuMat dst = createMat(size, depth, useRoi);
   2450         cv::cuda::max(loadMat(src, useRoi), val, dst);
   2451 
   2452         cv::Mat dst_gold = cv::max(src, val);
   2453 
   2454         EXPECT_MAT_NEAR(dst_gold, dst, depth < CV_32F ? 1.0 : 1e-5);
   2455     }
   2456 }
   2457 
   2458 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Max, testing::Combine(
   2459     ALL_DEVICES,
   2460     DIFFERENT_SIZES,
   2461     ALL_DEPTH,
   2462     WHOLE_SUBMAT));
   2463 
   2464 //////////////////////////////////////////////////////////////////////////////
   2465 // AddWeighted
   2466 
   2467 PARAM_TEST_CASE(AddWeighted, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, MatDepth, UseRoi)
   2468 {
   2469     cv::cuda::DeviceInfo devInfo;
   2470     cv::Size size;
   2471     int depth1;
   2472     int depth2;
   2473     int dst_depth;
   2474     bool useRoi;
   2475 
   2476     virtual void SetUp()
   2477     {
   2478         devInfo = GET_PARAM(0);
   2479         size = GET_PARAM(1);
   2480         depth1 = GET_PARAM(2);
   2481         depth2 = GET_PARAM(3);
   2482         dst_depth = GET_PARAM(4);
   2483         useRoi = GET_PARAM(5);
   2484 
   2485         cv::cuda::setDevice(devInfo.deviceID());
   2486     }
   2487 };
   2488 
   2489 CUDA_TEST_P(AddWeighted, Accuracy)
   2490 {
   2491     cv::Mat src1 = randomMat(size, depth1);
   2492     cv::Mat src2 = randomMat(size, depth2);
   2493     double alpha = randomDouble(-10.0, 10.0);
   2494     double beta = randomDouble(-10.0, 10.0);
   2495     double gamma = randomDouble(-10.0, 10.0);
   2496 
   2497     if ((depth1 == CV_64F || depth2 == CV_64F || dst_depth == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
   2498     {
   2499         try
   2500         {
   2501             cv::cuda::GpuMat dst;
   2502             cv::cuda::addWeighted(loadMat(src1), alpha, loadMat(src2), beta, gamma, dst, dst_depth);
   2503         }
   2504         catch (const cv::Exception& e)
   2505         {
   2506             ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
   2507         }
   2508     }
   2509     else
   2510     {
   2511         cv::cuda::GpuMat dst = createMat(size, dst_depth, useRoi);
   2512         cv::cuda::addWeighted(loadMat(src1, useRoi), alpha, loadMat(src2, useRoi), beta, gamma, dst, dst_depth);
   2513 
   2514         cv::Mat dst_gold;
   2515         cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth);
   2516 
   2517         EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 2.0 : 1e-3);
   2518     }
   2519 }
   2520 
   2521 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, AddWeighted, testing::Combine(
   2522     ALL_DEVICES,
   2523     DIFFERENT_SIZES,
   2524     ALL_DEPTH,
   2525     ALL_DEPTH,
   2526     ALL_DEPTH,
   2527     WHOLE_SUBMAT));
   2528 
   2529 ///////////////////////////////////////////////////////////////////////////////////////////////////////
   2530 // Threshold
   2531 
   2532 CV_ENUM(ThreshOp, cv::THRESH_BINARY, cv::THRESH_BINARY_INV, cv::THRESH_TRUNC, cv::THRESH_TOZERO, cv::THRESH_TOZERO_INV)
   2533 #define ALL_THRESH_OPS testing::Values(ThreshOp(cv::THRESH_BINARY), ThreshOp(cv::THRESH_BINARY_INV), ThreshOp(cv::THRESH_TRUNC), ThreshOp(cv::THRESH_TOZERO), ThreshOp(cv::THRESH_TOZERO_INV))
   2534 
   2535 PARAM_TEST_CASE(Threshold, cv::cuda::DeviceInfo, cv::Size, MatType, ThreshOp, UseRoi)
   2536 {
   2537     cv::cuda::DeviceInfo devInfo;
   2538     cv::Size size;
   2539     int type;
   2540     int threshOp;
   2541     bool useRoi;
   2542 
   2543     virtual void SetUp()
   2544     {
   2545         devInfo = GET_PARAM(0);
   2546         size = GET_PARAM(1);
   2547         type = GET_PARAM(2);
   2548         threshOp = GET_PARAM(3);
   2549         useRoi = GET_PARAM(4);
   2550 
   2551         cv::cuda::setDevice(devInfo.deviceID());
   2552     }
   2553 };
   2554 
   2555 CUDA_TEST_P(Threshold, Accuracy)
   2556 {
   2557     cv::Mat src = randomMat(size, type);
   2558     double maxVal = randomDouble(20.0, 127.0);
   2559     double thresh = randomDouble(0.0, maxVal);
   2560 
   2561     cv::cuda::GpuMat dst = createMat(src.size(), src.type(), useRoi);
   2562     cv::cuda::threshold(loadMat(src, useRoi), dst, thresh, maxVal, threshOp);
   2563 
   2564     cv::Mat dst_gold;
   2565     cv::threshold(src, dst_gold, thresh, maxVal, threshOp);
   2566 
   2567     EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
   2568 }
   2569 
   2570 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Threshold, testing::Combine(
   2571     ALL_DEVICES,
   2572     DIFFERENT_SIZES,
   2573     testing::Values(MatType(CV_8UC1), MatType(CV_16SC1), MatType(CV_32FC1)),
   2574     ALL_THRESH_OPS,
   2575     WHOLE_SUBMAT));
   2576 
   2577 ////////////////////////////////////////////////////////////////////////////////
   2578 // Magnitude
   2579 
   2580 PARAM_TEST_CASE(Magnitude, cv::cuda::DeviceInfo, cv::Size, UseRoi)
   2581 {
   2582     cv::cuda::DeviceInfo devInfo;
   2583     cv::Size size;
   2584     bool useRoi;
   2585 
   2586     virtual void SetUp()
   2587     {
   2588         devInfo = GET_PARAM(0);
   2589         size = GET_PARAM(1);
   2590         useRoi = GET_PARAM(2);
   2591 
   2592         cv::cuda::setDevice(devInfo.deviceID());
   2593     }
   2594 };
   2595 
   2596 CUDA_TEST_P(Magnitude, NPP)
   2597 {
   2598     cv::Mat src = randomMat(size, CV_32FC2);
   2599 
   2600     cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
   2601     cv::cuda::magnitude(loadMat(src, useRoi), dst);
   2602 
   2603     cv::Mat arr[2];
   2604     cv::split(src, arr);
   2605     cv::Mat dst_gold;
   2606     cv::magnitude(arr[0], arr[1], dst_gold);
   2607 
   2608     EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
   2609 }
   2610 
   2611 CUDA_TEST_P(Magnitude, Sqr_NPP)
   2612 {
   2613     cv::Mat src = randomMat(size, CV_32FC2);
   2614 
   2615     cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
   2616     cv::cuda::magnitudeSqr(loadMat(src, useRoi), dst);
   2617 
   2618     cv::Mat arr[2];
   2619     cv::split(src, arr);
   2620     cv::Mat dst_gold;
   2621     cv::magnitude(arr[0], arr[1], dst_gold);
   2622     cv::multiply(dst_gold, dst_gold, dst_gold);
   2623 
   2624     EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
   2625 }
   2626 
   2627 CUDA_TEST_P(Magnitude, Accuracy)
   2628 {
   2629     cv::Mat x = randomMat(size, CV_32FC1);
   2630     cv::Mat y = randomMat(size, CV_32FC1);
   2631 
   2632     cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
   2633     cv::cuda::magnitude(loadMat(x, useRoi), loadMat(y, useRoi), dst);
   2634 
   2635     cv::Mat dst_gold;
   2636     cv::magnitude(x, y, dst_gold);
   2637 
   2638     EXPECT_MAT_NEAR(dst_gold, dst, 1e-4);
   2639 }
   2640 
   2641 CUDA_TEST_P(Magnitude, Sqr_Accuracy)
   2642 {
   2643     cv::Mat x = randomMat(size, CV_32FC1);
   2644     cv::Mat y = randomMat(size, CV_32FC1);
   2645 
   2646     cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
   2647     cv::cuda::magnitudeSqr(loadMat(x, useRoi), loadMat(y, useRoi), dst);
   2648 
   2649     cv::Mat dst_gold;
   2650     cv::magnitude(x, y, dst_gold);
   2651     cv::multiply(dst_gold, dst_gold, dst_gold);
   2652 
   2653     EXPECT_MAT_NEAR(dst_gold, dst, 1e-1);
   2654 }
   2655 
   2656 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Magnitude, testing::Combine(
   2657     ALL_DEVICES,
   2658     DIFFERENT_SIZES,
   2659     WHOLE_SUBMAT));
   2660 
   2661 ////////////////////////////////////////////////////////////////////////////////
   2662 // Phase
   2663 
   2664 namespace
   2665 {
   2666     IMPLEMENT_PARAM_CLASS(AngleInDegrees, bool)
   2667 }
   2668 
   2669 PARAM_TEST_CASE(Phase, cv::cuda::DeviceInfo, cv::Size, AngleInDegrees, UseRoi)
   2670 {
   2671     cv::cuda::DeviceInfo devInfo;
   2672     cv::Size size;
   2673     bool angleInDegrees;
   2674     bool useRoi;
   2675 
   2676     virtual void SetUp()
   2677     {
   2678         devInfo = GET_PARAM(0);
   2679         size = GET_PARAM(1);
   2680         angleInDegrees = GET_PARAM(2);
   2681         useRoi = GET_PARAM(3);
   2682 
   2683         cv::cuda::setDevice(devInfo.deviceID());
   2684     }
   2685 };
   2686 
   2687 CUDA_TEST_P(Phase, Accuracy)
   2688 {
   2689     cv::Mat x = randomMat(size, CV_32FC1);
   2690     cv::Mat y = randomMat(size, CV_32FC1);
   2691 
   2692     cv::cuda::GpuMat dst = createMat(size, CV_32FC1, useRoi);
   2693     cv::cuda::phase(loadMat(x, useRoi), loadMat(y, useRoi), dst, angleInDegrees);
   2694 
   2695     cv::Mat dst_gold;
   2696     cv::phase(x, y, dst_gold, angleInDegrees);
   2697 
   2698     EXPECT_MAT_NEAR(dst_gold, dst, angleInDegrees ? 1e-2 : 1e-3);
   2699 }
   2700 
   2701 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Phase, testing::Combine(
   2702     ALL_DEVICES,
   2703     DIFFERENT_SIZES,
   2704     testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
   2705     WHOLE_SUBMAT));
   2706 
   2707 ////////////////////////////////////////////////////////////////////////////////
   2708 // CartToPolar
   2709 
   2710 PARAM_TEST_CASE(CartToPolar, cv::cuda::DeviceInfo, cv::Size, AngleInDegrees, UseRoi)
   2711 {
   2712     cv::cuda::DeviceInfo devInfo;
   2713     cv::Size size;
   2714     bool angleInDegrees;
   2715     bool useRoi;
   2716 
   2717     virtual void SetUp()
   2718     {
   2719         devInfo = GET_PARAM(0);
   2720         size = GET_PARAM(1);
   2721         angleInDegrees = GET_PARAM(2);
   2722         useRoi = GET_PARAM(3);
   2723 
   2724         cv::cuda::setDevice(devInfo.deviceID());
   2725     }
   2726 };
   2727 
   2728 CUDA_TEST_P(CartToPolar, Accuracy)
   2729 {
   2730     cv::Mat x = randomMat(size, CV_32FC1);
   2731     cv::Mat y = randomMat(size, CV_32FC1);
   2732 
   2733     cv::cuda::GpuMat mag = createMat(size, CV_32FC1, useRoi);
   2734     cv::cuda::GpuMat angle = createMat(size, CV_32FC1, useRoi);
   2735     cv::cuda::cartToPolar(loadMat(x, useRoi), loadMat(y, useRoi), mag, angle, angleInDegrees);
   2736 
   2737     cv::Mat mag_gold;
   2738     cv::Mat angle_gold;
   2739     cv::cartToPolar(x, y, mag_gold, angle_gold, angleInDegrees);
   2740 
   2741     EXPECT_MAT_NEAR(mag_gold, mag, 1e-4);
   2742     EXPECT_MAT_NEAR(angle_gold, angle, angleInDegrees ? 1e-2 : 1e-3);
   2743 }
   2744 
   2745 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CartToPolar, testing::Combine(
   2746     ALL_DEVICES,
   2747     DIFFERENT_SIZES,
   2748     testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
   2749     WHOLE_SUBMAT));
   2750 
   2751 ////////////////////////////////////////////////////////////////////////////////
   2752 // polarToCart
   2753 
   2754 PARAM_TEST_CASE(PolarToCart, cv::cuda::DeviceInfo, cv::Size, AngleInDegrees, UseRoi)
   2755 {
   2756     cv::cuda::DeviceInfo devInfo;
   2757     cv::Size size;
   2758     bool angleInDegrees;
   2759     bool useRoi;
   2760 
   2761     virtual void SetUp()
   2762     {
   2763         devInfo = GET_PARAM(0);
   2764         size = GET_PARAM(1);
   2765         angleInDegrees = GET_PARAM(2);
   2766         useRoi = GET_PARAM(3);
   2767 
   2768         cv::cuda::setDevice(devInfo.deviceID());
   2769     }
   2770 };
   2771 
   2772 CUDA_TEST_P(PolarToCart, Accuracy)
   2773 {
   2774     cv::Mat magnitude = randomMat(size, CV_32FC1);
   2775     cv::Mat angle = randomMat(size, CV_32FC1);
   2776 
   2777     cv::cuda::GpuMat x = createMat(size, CV_32FC1, useRoi);
   2778     cv::cuda::GpuMat y = createMat(size, CV_32FC1, useRoi);
   2779     cv::cuda::polarToCart(loadMat(magnitude, useRoi), loadMat(angle, useRoi), x, y, angleInDegrees);
   2780 
   2781     cv::Mat x_gold;
   2782     cv::Mat y_gold;
   2783     cv::polarToCart(magnitude, angle, x_gold, y_gold, angleInDegrees);
   2784 
   2785     EXPECT_MAT_NEAR(x_gold, x, 1e-4);
   2786     EXPECT_MAT_NEAR(y_gold, y, 1e-4);
   2787 }
   2788 
   2789 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, PolarToCart, testing::Combine(
   2790     ALL_DEVICES,
   2791     DIFFERENT_SIZES,
   2792     testing::Values(AngleInDegrees(false), AngleInDegrees(true)),
   2793     WHOLE_SUBMAT));
   2794 
   2795 #endif // HAVE_CUDA
   2796