1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved. 16 // Third party copyrights are property of their respective owners. 17 // 18 // Redistribution and use in source and binary forms, with or without modification, 19 // are permitted provided that the following conditions are met: 20 // 21 // * Redistribution's of source code must retain the above copyright notice, 22 // this list of conditions and the following disclaimer. 23 // 24 // * Redistribution's in binary form must reproduce the above copyright notice, 25 // this list of conditions and the following disclaimer in the documentation 26 // and/or other materials provided with the distribution. 27 // 28 // * The name of the copyright holders may not be used to endorse or promote products 29 // derived from this software without specific prior written permission. 30 // 31 // This software is provided by the copyright holders and contributors "as is" and 32 // any express or implied warranties, including, but not limited to, the implied 33 // warranties of merchantability and fitness for a particular purpose are disclaimed. 34 // In no event shall the Intel Corporation or contributors be liable for any direct, 35 // indirect, incidental, special, exemplary, or consequential damages 36 // (including, but not limited to, procurement of substitute goods or services; 37 // loss of use, data, or profits; or business interruption) however caused 38 // and on any theory of liability, whether in contract, strict liability, 39 // or tort (including negligence or otherwise) arising in any way out of 40 // the use of this software, even if advised of the possibility of such damage. 41 // 42 //M*/ 43 44 #ifndef __OPENCV_CORE_CUDAINL_HPP__ 45 #define __OPENCV_CORE_CUDAINL_HPP__ 46 47 #include "opencv2/core/cuda.hpp" 48 49 //! @cond IGNORED 50 51 namespace cv { namespace cuda { 52 53 //=================================================================================== 54 // GpuMat 55 //=================================================================================== 56 57 inline 58 GpuMat::GpuMat(Allocator* allocator_) 59 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 60 {} 61 62 inline 63 GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_) 64 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 65 { 66 if (rows_ > 0 && cols_ > 0) 67 create(rows_, cols_, type_); 68 } 69 70 inline 71 GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_) 72 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 73 { 74 if (size_.height > 0 && size_.width > 0) 75 create(size_.height, size_.width, type_); 76 } 77 78 inline 79 GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_) 80 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 81 { 82 if (rows_ > 0 && cols_ > 0) 83 { 84 create(rows_, cols_, type_); 85 setTo(s_); 86 } 87 } 88 89 inline 90 GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_) 91 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 92 { 93 if (size_.height > 0 && size_.width > 0) 94 { 95 create(size_.height, size_.width, type_); 96 setTo(s_); 97 } 98 } 99 100 inline 101 GpuMat::GpuMat(const GpuMat& m) 102 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator) 103 { 104 if (refcount) 105 CV_XADD(refcount, 1); 106 } 107 108 inline 109 GpuMat::GpuMat(InputArray arr, Allocator* allocator_) : 110 flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 111 { 112 upload(arr); 113 } 114 115 inline 116 GpuMat::~GpuMat() 117 { 118 release(); 119 } 120 121 inline 122 GpuMat& GpuMat::operator =(const GpuMat& m) 123 { 124 if (this != &m) 125 { 126 GpuMat temp(m); 127 swap(temp); 128 } 129 130 return *this; 131 } 132 133 inline 134 void GpuMat::create(Size size_, int type_) 135 { 136 create(size_.height, size_.width, type_); 137 } 138 139 inline 140 void GpuMat::swap(GpuMat& b) 141 { 142 std::swap(flags, b.flags); 143 std::swap(rows, b.rows); 144 std::swap(cols, b.cols); 145 std::swap(step, b.step); 146 std::swap(data, b.data); 147 std::swap(datastart, b.datastart); 148 std::swap(dataend, b.dataend); 149 std::swap(refcount, b.refcount); 150 std::swap(allocator, b.allocator); 151 } 152 153 inline 154 GpuMat GpuMat::clone() const 155 { 156 GpuMat m; 157 copyTo(m); 158 return m; 159 } 160 161 inline 162 void GpuMat::copyTo(OutputArray dst, InputArray mask) const 163 { 164 copyTo(dst, mask, Stream::Null()); 165 } 166 167 inline 168 GpuMat& GpuMat::setTo(Scalar s) 169 { 170 return setTo(s, Stream::Null()); 171 } 172 173 inline 174 GpuMat& GpuMat::setTo(Scalar s, InputArray mask) 175 { 176 return setTo(s, mask, Stream::Null()); 177 } 178 179 inline 180 void GpuMat::convertTo(OutputArray dst, int rtype) const 181 { 182 convertTo(dst, rtype, Stream::Null()); 183 } 184 185 inline 186 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const 187 { 188 convertTo(dst, rtype, alpha, beta, Stream::Null()); 189 } 190 191 inline 192 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const 193 { 194 convertTo(dst, rtype, alpha, 0.0, stream); 195 } 196 197 inline 198 void GpuMat::assignTo(GpuMat& m, int _type) const 199 { 200 if (_type < 0) 201 m = *this; 202 else 203 convertTo(m, _type); 204 } 205 206 inline 207 uchar* GpuMat::ptr(int y) 208 { 209 CV_DbgAssert( (unsigned)y < (unsigned)rows ); 210 return data + step * y; 211 } 212 213 inline 214 const uchar* GpuMat::ptr(int y) const 215 { 216 CV_DbgAssert( (unsigned)y < (unsigned)rows ); 217 return data + step * y; 218 } 219 220 template<typename _Tp> inline 221 _Tp* GpuMat::ptr(int y) 222 { 223 return (_Tp*)ptr(y); 224 } 225 226 template<typename _Tp> inline 227 const _Tp* GpuMat::ptr(int y) const 228 { 229 return (const _Tp*)ptr(y); 230 } 231 232 template <class T> inline 233 GpuMat::operator PtrStepSz<T>() const 234 { 235 return PtrStepSz<T>(rows, cols, (T*)data, step); 236 } 237 238 template <class T> inline 239 GpuMat::operator PtrStep<T>() const 240 { 241 return PtrStep<T>((T*)data, step); 242 } 243 244 inline 245 GpuMat GpuMat::row(int y) const 246 { 247 return GpuMat(*this, Range(y, y+1), Range::all()); 248 } 249 250 inline 251 GpuMat GpuMat::col(int x) const 252 { 253 return GpuMat(*this, Range::all(), Range(x, x+1)); 254 } 255 256 inline 257 GpuMat GpuMat::rowRange(int startrow, int endrow) const 258 { 259 return GpuMat(*this, Range(startrow, endrow), Range::all()); 260 } 261 262 inline 263 GpuMat GpuMat::rowRange(Range r) const 264 { 265 return GpuMat(*this, r, Range::all()); 266 } 267 268 inline 269 GpuMat GpuMat::colRange(int startcol, int endcol) const 270 { 271 return GpuMat(*this, Range::all(), Range(startcol, endcol)); 272 } 273 274 inline 275 GpuMat GpuMat::colRange(Range r) const 276 { 277 return GpuMat(*this, Range::all(), r); 278 } 279 280 inline 281 GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const 282 { 283 return GpuMat(*this, rowRange_, colRange_); 284 } 285 286 inline 287 GpuMat GpuMat::operator ()(Rect roi) const 288 { 289 return GpuMat(*this, roi); 290 } 291 292 inline 293 bool GpuMat::isContinuous() const 294 { 295 return (flags & Mat::CONTINUOUS_FLAG) != 0; 296 } 297 298 inline 299 size_t GpuMat::elemSize() const 300 { 301 return CV_ELEM_SIZE(flags); 302 } 303 304 inline 305 size_t GpuMat::elemSize1() const 306 { 307 return CV_ELEM_SIZE1(flags); 308 } 309 310 inline 311 int GpuMat::type() const 312 { 313 return CV_MAT_TYPE(flags); 314 } 315 316 inline 317 int GpuMat::depth() const 318 { 319 return CV_MAT_DEPTH(flags); 320 } 321 322 inline 323 int GpuMat::channels() const 324 { 325 return CV_MAT_CN(flags); 326 } 327 328 inline 329 size_t GpuMat::step1() const 330 { 331 return step / elemSize1(); 332 } 333 334 inline 335 Size GpuMat::size() const 336 { 337 return Size(cols, rows); 338 } 339 340 inline 341 bool GpuMat::empty() const 342 { 343 return data == 0; 344 } 345 346 static inline 347 GpuMat createContinuous(int rows, int cols, int type) 348 { 349 GpuMat m; 350 createContinuous(rows, cols, type, m); 351 return m; 352 } 353 354 static inline 355 void createContinuous(Size size, int type, OutputArray arr) 356 { 357 createContinuous(size.height, size.width, type, arr); 358 } 359 360 static inline 361 GpuMat createContinuous(Size size, int type) 362 { 363 GpuMat m; 364 createContinuous(size, type, m); 365 return m; 366 } 367 368 static inline 369 void ensureSizeIsEnough(Size size, int type, OutputArray arr) 370 { 371 ensureSizeIsEnough(size.height, size.width, type, arr); 372 } 373 374 static inline 375 void swap(GpuMat& a, GpuMat& b) 376 { 377 a.swap(b); 378 } 379 380 //=================================================================================== 381 // HostMem 382 //=================================================================================== 383 384 inline 385 HostMem::HostMem(AllocType alloc_type_) 386 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 387 { 388 } 389 390 inline 391 HostMem::HostMem(const HostMem& m) 392 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type) 393 { 394 if( refcount ) 395 CV_XADD(refcount, 1); 396 } 397 398 inline 399 HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_) 400 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 401 { 402 if (rows_ > 0 && cols_ > 0) 403 create(rows_, cols_, type_); 404 } 405 406 inline 407 HostMem::HostMem(Size size_, int type_, AllocType alloc_type_) 408 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 409 { 410 if (size_.height > 0 && size_.width > 0) 411 create(size_.height, size_.width, type_); 412 } 413 414 inline 415 HostMem::HostMem(InputArray arr, AllocType alloc_type_) 416 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 417 { 418 arr.getMat().copyTo(*this); 419 } 420 421 inline 422 HostMem::~HostMem() 423 { 424 release(); 425 } 426 427 inline 428 HostMem& HostMem::operator =(const HostMem& m) 429 { 430 if (this != &m) 431 { 432 HostMem temp(m); 433 swap(temp); 434 } 435 436 return *this; 437 } 438 439 inline 440 void HostMem::swap(HostMem& b) 441 { 442 std::swap(flags, b.flags); 443 std::swap(rows, b.rows); 444 std::swap(cols, b.cols); 445 std::swap(step, b.step); 446 std::swap(data, b.data); 447 std::swap(datastart, b.datastart); 448 std::swap(dataend, b.dataend); 449 std::swap(refcount, b.refcount); 450 std::swap(alloc_type, b.alloc_type); 451 } 452 453 inline 454 HostMem HostMem::clone() const 455 { 456 HostMem m(size(), type(), alloc_type); 457 createMatHeader().copyTo(m); 458 return m; 459 } 460 461 inline 462 void HostMem::create(Size size_, int type_) 463 { 464 create(size_.height, size_.width, type_); 465 } 466 467 inline 468 Mat HostMem::createMatHeader() const 469 { 470 return Mat(size(), type(), data, step); 471 } 472 473 inline 474 bool HostMem::isContinuous() const 475 { 476 return (flags & Mat::CONTINUOUS_FLAG) != 0; 477 } 478 479 inline 480 size_t HostMem::elemSize() const 481 { 482 return CV_ELEM_SIZE(flags); 483 } 484 485 inline 486 size_t HostMem::elemSize1() const 487 { 488 return CV_ELEM_SIZE1(flags); 489 } 490 491 inline 492 int HostMem::type() const 493 { 494 return CV_MAT_TYPE(flags); 495 } 496 497 inline 498 int HostMem::depth() const 499 { 500 return CV_MAT_DEPTH(flags); 501 } 502 503 inline 504 int HostMem::channels() const 505 { 506 return CV_MAT_CN(flags); 507 } 508 509 inline 510 size_t HostMem::step1() const 511 { 512 return step / elemSize1(); 513 } 514 515 inline 516 Size HostMem::size() const 517 { 518 return Size(cols, rows); 519 } 520 521 inline 522 bool HostMem::empty() const 523 { 524 return data == 0; 525 } 526 527 static inline 528 void swap(HostMem& a, HostMem& b) 529 { 530 a.swap(b); 531 } 532 533 //=================================================================================== 534 // Stream 535 //=================================================================================== 536 537 inline 538 Stream::Stream(const Ptr<Impl>& impl) 539 : impl_(impl) 540 { 541 } 542 543 //=================================================================================== 544 // Initialization & Info 545 //=================================================================================== 546 547 inline 548 bool TargetArchs::has(int major, int minor) 549 { 550 return hasPtx(major, minor) || hasBin(major, minor); 551 } 552 553 inline 554 bool TargetArchs::hasEqualOrGreater(int major, int minor) 555 { 556 return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor); 557 } 558 559 inline 560 DeviceInfo::DeviceInfo() 561 { 562 device_id_ = getDevice(); 563 } 564 565 inline 566 DeviceInfo::DeviceInfo(int device_id) 567 { 568 CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() ); 569 device_id_ = device_id; 570 } 571 572 inline 573 int DeviceInfo::deviceID() const 574 { 575 return device_id_; 576 } 577 578 inline 579 size_t DeviceInfo::freeMemory() const 580 { 581 size_t _totalMemory, _freeMemory; 582 queryMemory(_totalMemory, _freeMemory); 583 return _freeMemory; 584 } 585 586 inline 587 size_t DeviceInfo::totalMemory() const 588 { 589 size_t _totalMemory, _freeMemory; 590 queryMemory(_totalMemory, _freeMemory); 591 return _totalMemory; 592 } 593 594 inline 595 bool DeviceInfo::supports(FeatureSet feature_set) const 596 { 597 int version = majorVersion() * 10 + minorVersion(); 598 return version >= feature_set; 599 } 600 601 602 }} // namespace cv { namespace cuda { 603 604 //=================================================================================== 605 // Mat 606 //=================================================================================== 607 608 namespace cv { 609 610 inline 611 Mat::Mat(const cuda::GpuMat& m) 612 : flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) 613 { 614 m.download(*this); 615 } 616 617 } 618 619 //! @endcond 620 621 #endif // __OPENCV_CORE_CUDAINL_HPP__ 622