1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 15 // Third party copyrights are property of their respective owners. 16 // 17 // Redistribution and use in source and binary forms, with or without modification, 18 // are permitted provided that the following conditions are met: 19 // 20 // * Redistribution's of source code must retain the above copyright notice, 21 // this list of conditions and the following disclaimer. 22 // 23 // * Redistribution's in binary form must reproduce the above copyright notice, 24 // this list of conditions and the following disclaimer in the documentation 25 // and/or other materials provided with the distribution. 26 // 27 // * The name of the copyright holders may not be used to endorse or promote products 28 // derived from this software without specific prior written permission. 29 // 30 // This software is provided by the copyright holders and contributors "as is" and 31 // any express or implied warranties, including, but not limited to, the implied 32 // warranties of merchantability and fitness for a particular purpose are disclaimed. 33 // In no event shall the Intel Corporation or contributors be liable for any direct, 34 // indirect, incidental, special, exemplary, or consequential damages 35 // (including, but not limited to, procurement of substitute goods or services; 36 // loss of use, data, or profits; or business interruption) however caused 37 // and on any theory of liability, whether in contract, strict liability, 38 // or tort (including negligence or otherwise) arising in any way out of 39 // the use of this software, even if advised of the possibility of such damage. 40 // 41 //M*/ 42 43 #include "test_precomp.hpp" 44 45 namespace 46 { 47 // http://www.christian-seiler.de/projekte/fpmath/ 48 class FpuControl 49 { 50 public: 51 FpuControl(); 52 ~FpuControl(); 53 54 private: 55 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__) 56 fpu_control_t fpu_oldcw, fpu_cw; 57 #elif defined(_WIN32) && !defined(_WIN64) 58 unsigned int fpu_oldcw, fpu_cw; 59 #endif 60 }; 61 62 FpuControl::FpuControl() 63 { 64 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__) 65 _FPU_GETCW(fpu_oldcw); 66 fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE; 67 _FPU_SETCW(fpu_cw); 68 #elif defined(_WIN32) && !defined(_WIN64) 69 _controlfp_s(&fpu_cw, 0, 0); 70 fpu_oldcw = fpu_cw; 71 _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); 72 #endif 73 } 74 75 FpuControl::~FpuControl() 76 { 77 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__) 78 _FPU_SETCW(fpu_oldcw); 79 #elif defined(_WIN32) && !defined(_WIN64) 80 _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); 81 #endif 82 } 83 } 84 85 TestHaarCascadeApplication::TestHaarCascadeApplication(std::string testName_, NCVTestSourceProvider<Ncv8u> &src_, 86 std::string cascadeName_, Ncv32u width_, Ncv32u height_) 87 : 88 NCVTestProvider(testName_), 89 src(src_), 90 cascadeName(cascadeName_), 91 width(width_), 92 height(height_) 93 { 94 } 95 96 97 bool TestHaarCascadeApplication::toString(std::ofstream &strOut) 98 { 99 strOut << "cascadeName=" << cascadeName << std::endl; 100 strOut << "width=" << width << std::endl; 101 strOut << "height=" << height << std::endl; 102 return true; 103 } 104 105 106 bool TestHaarCascadeApplication::init() 107 { 108 return true; 109 } 110 111 bool TestHaarCascadeApplication::process() 112 { 113 NCVStatus ncvStat; 114 bool rcode = false; 115 116 Ncv32u numStages, numNodes, numFeatures; 117 118 ncvStat = ncvHaarGetClassifierSize(this->cascadeName, numStages, numNodes, numFeatures); 119 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 120 121 NCVVectorAlloc<HaarStage64> h_HaarStages(*this->allocatorCPU.get(), numStages); 122 ncvAssertReturn(h_HaarStages.isMemAllocated(), false); 123 NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(*this->allocatorCPU.get(), numNodes); 124 ncvAssertReturn(h_HaarNodes.isMemAllocated(), false); 125 NCVVectorAlloc<HaarFeature64> h_HaarFeatures(*this->allocatorCPU.get(), numFeatures); 126 ncvAssertReturn(h_HaarFeatures.isMemAllocated(), false); 127 128 NCVVectorAlloc<HaarStage64> d_HaarStages(*this->allocatorGPU.get(), numStages); 129 ncvAssertReturn(d_HaarStages.isMemAllocated(), false); 130 NCVVectorAlloc<HaarClassifierNode128> d_HaarNodes(*this->allocatorGPU.get(), numNodes); 131 ncvAssertReturn(d_HaarNodes.isMemAllocated(), false); 132 NCVVectorAlloc<HaarFeature64> d_HaarFeatures(*this->allocatorGPU.get(), numFeatures); 133 ncvAssertReturn(d_HaarFeatures.isMemAllocated(), false); 134 135 HaarClassifierCascadeDescriptor haar; 136 haar.ClassifierSize.width = haar.ClassifierSize.height = 1; 137 haar.bNeedsTiltedII = false; 138 haar.NumClassifierRootNodes = numNodes; 139 haar.NumClassifierTotalNodes = numNodes; 140 haar.NumFeatures = numFeatures; 141 haar.NumStages = numStages; 142 143 NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting()); 144 NCV_SKIP_COND_BEGIN 145 146 ncvStat = ncvHaarLoadFromFile_host(this->cascadeName, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures); 147 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 148 149 ncvAssertReturn(NCV_SUCCESS == h_HaarStages.copySolid(d_HaarStages, 0), false); 150 ncvAssertReturn(NCV_SUCCESS == h_HaarNodes.copySolid(d_HaarNodes, 0), false); 151 ncvAssertReturn(NCV_SUCCESS == h_HaarFeatures.copySolid(d_HaarFeatures, 0), false); 152 ncvAssertCUDAReturn(cudaStreamSynchronize(0), false); 153 154 NCV_SKIP_COND_END 155 156 NcvSize32s srcRoi, srcIIRoi, searchRoi; 157 srcRoi.width = this->width; 158 srcRoi.height = this->height; 159 srcIIRoi.width = srcRoi.width + 1; 160 srcIIRoi.height = srcRoi.height + 1; 161 searchRoi.width = srcIIRoi.width - haar.ClassifierSize.width; 162 searchRoi.height = srcIIRoi.height - haar.ClassifierSize.height; 163 if (searchRoi.width <= 0 || searchRoi.height <= 0) 164 { 165 return false; 166 } 167 NcvSize32u searchRoiU(searchRoi.width, searchRoi.height); 168 169 NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height); 170 ncvAssertReturn(d_img.isMemAllocated(), false); 171 NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height); 172 ncvAssertReturn(h_img.isMemAllocated(), false); 173 174 Ncv32u integralWidth = this->width + 1; 175 Ncv32u integralHeight = this->height + 1; 176 177 NCVMatrixAlloc<Ncv32u> d_integralImage(*this->allocatorGPU.get(), integralWidth, integralHeight); 178 ncvAssertReturn(d_integralImage.isMemAllocated(), false); 179 NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(*this->allocatorGPU.get(), integralWidth, integralHeight); 180 ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), false); 181 NCVMatrixAlloc<Ncv32u> h_integralImage(*this->allocatorCPU.get(), integralWidth, integralHeight); 182 ncvAssertReturn(h_integralImage.isMemAllocated(), false); 183 NCVMatrixAlloc<Ncv64u> h_sqIntegralImage(*this->allocatorCPU.get(), integralWidth, integralHeight); 184 ncvAssertReturn(h_sqIntegralImage.isMemAllocated(), false); 185 186 NCVMatrixAlloc<Ncv32f> d_rectStdDev(*this->allocatorGPU.get(), this->width, this->height); 187 ncvAssertReturn(d_rectStdDev.isMemAllocated(), false); 188 NCVMatrixAlloc<Ncv32u> d_pixelMask(*this->allocatorGPU.get(), this->width, this->height); 189 ncvAssertReturn(d_pixelMask.isMemAllocated(), false); 190 NCVMatrixAlloc<Ncv32f> h_rectStdDev(*this->allocatorCPU.get(), this->width, this->height); 191 ncvAssertReturn(h_rectStdDev.isMemAllocated(), false); 192 NCVMatrixAlloc<Ncv32u> h_pixelMask(*this->allocatorCPU.get(), this->width, this->height); 193 ncvAssertReturn(h_pixelMask.isMemAllocated(), false); 194 195 NCVVectorAlloc<NcvRect32u> d_hypotheses(*this->allocatorGPU.get(), this->width * this->height); 196 ncvAssertReturn(d_hypotheses.isMemAllocated(), false); 197 NCVVectorAlloc<NcvRect32u> h_hypotheses(*this->allocatorCPU.get(), this->width * this->height); 198 ncvAssertReturn(h_hypotheses.isMemAllocated(), false); 199 200 NCVStatus nppStat; 201 Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; 202 nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &szTmpBufIntegral, this->devProp); 203 ncvAssertReturn(nppStat == NPPST_SUCCESS, false); 204 nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &szTmpBufSqIntegral, this->devProp); 205 ncvAssertReturn(nppStat == NPPST_SUCCESS, false); 206 NCVVectorAlloc<Ncv8u> d_tmpIIbuf(*this->allocatorGPU.get(), std::max(szTmpBufIntegral, szTmpBufSqIntegral)); 207 ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), false); 208 209 Ncv32u detectionsOnThisScale_d = 0; 210 Ncv32u detectionsOnThisScale_h = 0; 211 212 NCV_SKIP_COND_BEGIN 213 214 ncvAssertReturn(this->src.fill(h_img), false); 215 ncvStat = h_img.copySolid(d_img, 0); 216 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 217 ncvAssertCUDAReturn(cudaStreamSynchronize(0), false); 218 219 nppStat = nppiStIntegral_8u32u_C1R(d_img.ptr(), d_img.pitch(), 220 d_integralImage.ptr(), d_integralImage.pitch(), 221 NcvSize32u(d_img.width(), d_img.height()), 222 d_tmpIIbuf.ptr(), szTmpBufIntegral, this->devProp); 223 ncvAssertReturn(nppStat == NPPST_SUCCESS, false); 224 225 nppStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(), 226 d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), 227 NcvSize32u(d_img.width(), d_img.height()), 228 d_tmpIIbuf.ptr(), szTmpBufSqIntegral, this->devProp); 229 ncvAssertReturn(nppStat == NPPST_SUCCESS, false); 230 231 const NcvRect32u rect( 232 HAAR_STDDEV_BORDER, 233 HAAR_STDDEV_BORDER, 234 haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, 235 haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); 236 nppStat = nppiStRectStdDev_32f_C1R( 237 d_integralImage.ptr(), d_integralImage.pitch(), 238 d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), 239 d_rectStdDev.ptr(), d_rectStdDev.pitch(), 240 NcvSize32u(searchRoi.width, searchRoi.height), rect, 241 1.0f, true); 242 ncvAssertReturn(nppStat == NPPST_SUCCESS, false); 243 244 ncvStat = d_integralImage.copySolid(h_integralImage, 0); 245 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 246 ncvStat = d_rectStdDev.copySolid(h_rectStdDev, 0); 247 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 248 249 for (Ncv32u i=0; i<searchRoiU.height; i++) 250 { 251 for (Ncv32u j=0; j<h_pixelMask.stride(); j++) 252 { 253 if (j<searchRoiU.width) 254 { 255 h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = (i << 16) | j; 256 } 257 else 258 { 259 h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; 260 } 261 } 262 } 263 ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false); 264 265 { 266 // calculations here 267 FpuControl fpu; 268 (void) fpu; 269 270 ncvStat = ncvApplyHaarClassifierCascade_host( 271 h_integralImage, h_rectStdDev, h_pixelMask, 272 detectionsOnThisScale_h, 273 haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false, 274 searchRoiU, 1, 1.0f); 275 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 276 } 277 278 NCV_SKIP_COND_END 279 280 int devId; 281 ncvAssertCUDAReturn(cudaGetDevice(&devId), false); 282 cudaDeviceProp _devProp; 283 ncvAssertCUDAReturn(cudaGetDeviceProperties(&_devProp, devId), false); 284 285 ncvStat = ncvApplyHaarClassifierCascade_device( 286 d_integralImage, d_rectStdDev, d_pixelMask, 287 detectionsOnThisScale_d, 288 haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, 289 searchRoiU, 1, 1.0f, 290 *this->allocatorGPU.get(), *this->allocatorCPU.get(), 291 _devProp, 0); 292 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 293 294 NCVMatrixAlloc<Ncv32u> h_pixelMask_d(*this->allocatorCPU.get(), this->width, this->height); 295 ncvAssertReturn(h_pixelMask_d.isMemAllocated(), false); 296 297 //bit-to-bit check 298 bool bLoopVirgin = true; 299 300 NCV_SKIP_COND_BEGIN 301 302 ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); 303 ncvAssertReturn(ncvStat == NCV_SUCCESS, false); 304 305 if (detectionsOnThisScale_d != detectionsOnThisScale_h) 306 { 307 bLoopVirgin = false; 308 } 309 else 310 { 311 std::sort(h_pixelMask_d.ptr(), h_pixelMask_d.ptr() + detectionsOnThisScale_d); 312 for (Ncv32u i=0; i<detectionsOnThisScale_d && bLoopVirgin; i++) 313 { 314 if (h_pixelMask.ptr()[i] != h_pixelMask_d.ptr()[i]) 315 { 316 bLoopVirgin = false; 317 } 318 } 319 } 320 321 NCV_SKIP_COND_END 322 323 if (bLoopVirgin) 324 { 325 rcode = true; 326 } 327 328 return rcode; 329 } 330 331 332 bool TestHaarCascadeApplication::deinit() 333 { 334 return true; 335 } 336