1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 15 // Third party copyrights are property of their respective owners. 16 // 17 // Redistribution and use in source and binary forms, with or without modification, 18 // are permitted provided that the following conditions are met: 19 // 20 // * Redistribution's of source code must retain the above copyright notice, 21 // this list of conditions and the following disclaimer. 22 // 23 // * Redistribution's in binary form must reproduce the above copyright notice, 24 // this list of conditions and the following disclaimer in the documentation 25 // and/or other materials provided with the distribution. 26 // 27 // * The name of the copyright holders may not be used to endorse or promote products 28 // derived from this software without specific prior written permission. 29 // 30 // This software is provided by the copyright holders and contributors "as is" and 31 // any express or implied warranties, including, but not limited to, the implied 32 // warranties of merchantability and fitness for a particular purpose are disclaimed. 33 // In no event shall the Intel Corporation or contributors be liable for any direct, 34 // indirect, incidental, special, exemplary, or consequential damages 35 // (including, but not limited to, procurement of substitute goods or services; 36 // loss of use, data, or profits; or business interruption) however caused 37 // and on any theory of liability, whether in contract, strict liability, 38 // or tort (including negligence or otherwise) arising in any way out of 39 // the use of this software, even if advised of the possibility of such damage. 40 // 41 //M*/ 42 43 #include "precomp.hpp" 44 45 #ifdef HAVE_OPENGL 46 # include "gl_core_3_1.hpp" 47 # ifdef HAVE_CUDA 48 # include <cuda_gl_interop.h> 49 # endif 50 #endif 51 52 using namespace cv; 53 using namespace cv::cuda; 54 55 namespace 56 { 57 #ifndef HAVE_OPENGL 58 inline void throw_no_ogl() { CV_Error(cv::Error::OpenGlNotSupported, "The library is compiled without OpenGL support"); } 59 #else 60 inline void throw_no_ogl() { CV_Error(cv::Error::OpenGlApiCallError, "OpenGL context doesn't exist"); } 61 62 bool checkError(const char* file, const int line, const char* func = 0) 63 { 64 GLenum err = gl::GetError(); 65 66 if (err != gl::NO_ERROR_) 67 { 68 const char* msg; 69 70 switch (err) 71 { 72 case gl::INVALID_ENUM: 73 msg = "An unacceptable value is specified for an enumerated argument"; 74 break; 75 76 case gl::INVALID_VALUE: 77 msg = "A numeric argument is out of range"; 78 break; 79 80 case gl::INVALID_OPERATION: 81 msg = "The specified operation is not allowed in the current state"; 82 break; 83 84 case gl::OUT_OF_MEMORY: 85 msg = "There is not enough memory left to execute the command"; 86 break; 87 88 default: 89 msg = "Unknown error"; 90 }; 91 92 cvError(CV_OpenGlApiCallError, func, msg, file, line); 93 94 return false; 95 } 96 97 return true; 98 } 99 #endif 100 101 #define CV_CheckGlError() CV_DbgAssert( (checkError(__FILE__, __LINE__, CV_Func)) ) 102 } // namespace 103 104 #ifdef HAVE_OPENGL 105 namespace 106 { 107 const GLenum gl_types[] = { gl::UNSIGNED_BYTE, gl::BYTE, gl::UNSIGNED_SHORT, gl::SHORT, gl::INT, gl::FLOAT, gl::DOUBLE }; 108 } 109 #endif 110 111 //////////////////////////////////////////////////////////////////////// 112 // setGlDevice 113 114 void cv::cuda::setGlDevice(int device) 115 { 116 #ifndef HAVE_OPENGL 117 (void) device; 118 throw_no_ogl(); 119 #else 120 #ifndef HAVE_CUDA 121 (void) device; 122 throw_no_cuda(); 123 #else 124 cudaSafeCall( cudaGLSetGLDevice(device) ); 125 #endif 126 #endif 127 } 128 129 //////////////////////////////////////////////////////////////////////// 130 // CudaResource 131 132 #if defined(HAVE_OPENGL) && defined(HAVE_CUDA) 133 134 namespace 135 { 136 class CudaResource 137 { 138 public: 139 CudaResource(); 140 ~CudaResource(); 141 142 void registerBuffer(GLuint buffer); 143 void release(); 144 145 void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0); 146 void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0); 147 148 void* map(cudaStream_t stream = 0); 149 void unmap(cudaStream_t stream = 0); 150 151 private: 152 cudaGraphicsResource_t resource_; 153 GLuint buffer_; 154 155 class GraphicsMapHolder; 156 }; 157 158 CudaResource::CudaResource() : resource_(0), buffer_(0) 159 { 160 } 161 162 CudaResource::~CudaResource() 163 { 164 release(); 165 } 166 167 void CudaResource::registerBuffer(GLuint buffer) 168 { 169 CV_DbgAssert( buffer != 0 ); 170 171 if (buffer_ == buffer) 172 return; 173 174 cudaGraphicsResource_t resource; 175 cudaSafeCall( cudaGraphicsGLRegisterBuffer(&resource, buffer, cudaGraphicsMapFlagsNone) ); 176 177 release(); 178 179 resource_ = resource; 180 buffer_ = buffer; 181 } 182 183 void CudaResource::release() 184 { 185 if (resource_) 186 cudaGraphicsUnregisterResource(resource_); 187 188 resource_ = 0; 189 buffer_ = 0; 190 } 191 192 class CudaResource::GraphicsMapHolder 193 { 194 public: 195 GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream); 196 ~GraphicsMapHolder(); 197 198 void reset(); 199 200 private: 201 cudaGraphicsResource_t* resource_; 202 cudaStream_t stream_; 203 }; 204 205 CudaResource::GraphicsMapHolder::GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream) : resource_(resource), stream_(stream) 206 { 207 if (resource_) 208 cudaSafeCall( cudaGraphicsMapResources(1, resource_, stream_) ); 209 } 210 211 CudaResource::GraphicsMapHolder::~GraphicsMapHolder() 212 { 213 if (resource_) 214 cudaGraphicsUnmapResources(1, resource_, stream_); 215 } 216 217 void CudaResource::GraphicsMapHolder::reset() 218 { 219 resource_ = 0; 220 } 221 222 void CudaResource::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream) 223 { 224 CV_DbgAssert( resource_ != 0 ); 225 226 GraphicsMapHolder h(&resource_, stream); 227 (void) h; 228 229 void* dst; 230 size_t size; 231 cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&dst, &size, resource_) ); 232 233 CV_DbgAssert( width * height == size ); 234 235 if (stream == 0) 236 cudaSafeCall( cudaMemcpy2D(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice) ); 237 else 238 cudaSafeCall( cudaMemcpy2DAsync(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice, stream) ); 239 } 240 241 void CudaResource::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) 242 { 243 CV_DbgAssert( resource_ != 0 ); 244 245 GraphicsMapHolder h(&resource_, stream); 246 (void) h; 247 248 void* src; 249 size_t size; 250 cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&src, &size, resource_) ); 251 252 CV_DbgAssert( width * height == size ); 253 254 if (stream == 0) 255 cudaSafeCall( cudaMemcpy2D(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice) ); 256 else 257 cudaSafeCall( cudaMemcpy2DAsync(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice, stream) ); 258 } 259 260 void* CudaResource::map(cudaStream_t stream) 261 { 262 CV_DbgAssert( resource_ != 0 ); 263 264 GraphicsMapHolder h(&resource_, stream); 265 266 void* ptr; 267 size_t size; 268 cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&ptr, &size, resource_) ); 269 270 h.reset(); 271 272 return ptr; 273 } 274 275 void CudaResource::unmap(cudaStream_t stream) 276 { 277 CV_Assert( resource_ != 0 ); 278 279 cudaGraphicsUnmapResources(1, &resource_, stream); 280 } 281 } 282 283 #endif 284 285 //////////////////////////////////////////////////////////////////////// 286 // ogl::Buffer 287 288 #ifndef HAVE_OPENGL 289 290 class cv::ogl::Buffer::Impl 291 { 292 }; 293 294 #else 295 296 class cv::ogl::Buffer::Impl 297 { 298 public: 299 static const Ptr<Impl>& empty(); 300 301 Impl(GLuint bufId, bool autoRelease); 302 Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease); 303 ~Impl(); 304 305 void bind(GLenum target) const; 306 307 void copyFrom(GLuint srcBuf, GLsizeiptr size); 308 309 void copyFrom(GLsizeiptr size, const GLvoid* data); 310 void copyTo(GLsizeiptr size, GLvoid* data) const; 311 312 void* mapHost(GLenum access); 313 void unmapHost(); 314 315 #ifdef HAVE_CUDA 316 void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0); 317 void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0) const; 318 319 void* mapDevice(cudaStream_t stream = 0); 320 void unmapDevice(cudaStream_t stream = 0); 321 #endif 322 323 void setAutoRelease(bool flag) { autoRelease_ = flag; } 324 325 GLuint bufId() const { return bufId_; } 326 327 private: 328 Impl(); 329 330 GLuint bufId_; 331 bool autoRelease_; 332 333 #ifdef HAVE_CUDA 334 mutable CudaResource cudaResource_; 335 #endif 336 }; 337 338 const Ptr<cv::ogl::Buffer::Impl>& cv::ogl::Buffer::Impl::empty() 339 { 340 static Ptr<Impl> p(new Impl); 341 return p; 342 } 343 344 cv::ogl::Buffer::Impl::Impl() : bufId_(0), autoRelease_(false) 345 { 346 } 347 348 cv::ogl::Buffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease) 349 { 350 CV_Assert( gl::IsBuffer(abufId) == gl::TRUE_ ); 351 } 352 353 cv::ogl::Buffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease) 354 { 355 gl::GenBuffers(1, &bufId_); 356 CV_CheckGlError(); 357 358 CV_Assert( bufId_ != 0 ); 359 360 gl::BindBuffer(target, bufId_); 361 CV_CheckGlError(); 362 363 gl::BufferData(target, size, data, gl::DYNAMIC_DRAW); 364 CV_CheckGlError(); 365 366 gl::BindBuffer(target, 0); 367 CV_CheckGlError(); 368 } 369 370 cv::ogl::Buffer::Impl::~Impl() 371 { 372 if (autoRelease_ && bufId_) 373 gl::DeleteBuffers(1, &bufId_); 374 } 375 376 void cv::ogl::Buffer::Impl::bind(GLenum target) const 377 { 378 gl::BindBuffer(target, bufId_); 379 CV_CheckGlError(); 380 } 381 382 void cv::ogl::Buffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size) 383 { 384 gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_); 385 CV_CheckGlError(); 386 387 gl::BindBuffer(gl::COPY_READ_BUFFER, srcBuf); 388 CV_CheckGlError(); 389 390 gl::CopyBufferSubData(gl::COPY_READ_BUFFER, gl::COPY_WRITE_BUFFER, 0, 0, size); 391 CV_CheckGlError(); 392 } 393 394 void cv::ogl::Buffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data) 395 { 396 gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_); 397 CV_CheckGlError(); 398 399 gl::BufferSubData(gl::COPY_WRITE_BUFFER, 0, size, data); 400 CV_CheckGlError(); 401 } 402 403 void cv::ogl::Buffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const 404 { 405 gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_); 406 CV_CheckGlError(); 407 408 gl::GetBufferSubData(gl::COPY_READ_BUFFER, 0, size, data); 409 CV_CheckGlError(); 410 } 411 412 void* cv::ogl::Buffer::Impl::mapHost(GLenum access) 413 { 414 gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_); 415 CV_CheckGlError(); 416 417 GLvoid* data = gl::MapBuffer(gl::COPY_READ_BUFFER, access); 418 CV_CheckGlError(); 419 420 return data; 421 } 422 423 void cv::ogl::Buffer::Impl::unmapHost() 424 { 425 gl::UnmapBuffer(gl::COPY_READ_BUFFER); 426 } 427 428 #ifdef HAVE_CUDA 429 430 void cv::ogl::Buffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream) 431 { 432 cudaResource_.registerBuffer(bufId_); 433 cudaResource_.copyFrom(src, spitch, width, height, stream); 434 } 435 436 void cv::ogl::Buffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const 437 { 438 cudaResource_.registerBuffer(bufId_); 439 cudaResource_.copyTo(dst, dpitch, width, height, stream); 440 } 441 442 void* cv::ogl::Buffer::Impl::mapDevice(cudaStream_t stream) 443 { 444 cudaResource_.registerBuffer(bufId_); 445 return cudaResource_.map(stream); 446 } 447 448 void cv::ogl::Buffer::Impl::unmapDevice(cudaStream_t stream) 449 { 450 cudaResource_.unmap(stream); 451 } 452 453 #endif // HAVE_CUDA 454 455 #endif // HAVE_OPENGL 456 457 cv::ogl::Buffer::Buffer() : rows_(0), cols_(0), type_(0) 458 { 459 #ifndef HAVE_OPENGL 460 throw_no_ogl(); 461 #else 462 impl_ = Impl::empty(); 463 #endif 464 } 465 466 cv::ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0) 467 { 468 #ifndef HAVE_OPENGL 469 (void) arows; 470 (void) acols; 471 (void) atype; 472 (void) abufId; 473 (void) autoRelease; 474 throw_no_ogl(); 475 #else 476 impl_.reset(new Impl(abufId, autoRelease)); 477 rows_ = arows; 478 cols_ = acols; 479 type_ = atype; 480 #endif 481 } 482 483 cv::ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0) 484 { 485 #ifndef HAVE_OPENGL 486 (void) asize; 487 (void) atype; 488 (void) abufId; 489 (void) autoRelease; 490 throw_no_ogl(); 491 #else 492 impl_.reset(new Impl(abufId, autoRelease)); 493 rows_ = asize.height; 494 cols_ = asize.width; 495 type_ = atype; 496 #endif 497 } 498 499 cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) 500 { 501 #ifndef HAVE_OPENGL 502 (void) arr; 503 (void) target; 504 (void) autoRelease; 505 throw_no_ogl(); 506 #else 507 const int kind = arr.kind(); 508 509 switch (kind) 510 { 511 case _InputArray::OPENGL_BUFFER: 512 case _InputArray::CUDA_GPU_MAT: 513 copyFrom(arr, target, autoRelease); 514 break; 515 516 default: 517 { 518 Mat mat = arr.getMat(); 519 CV_Assert( mat.isContinuous() ); 520 const GLsizeiptr asize = mat.rows * mat.cols * mat.elemSize(); 521 impl_.reset(new Impl(asize, mat.data, target, autoRelease)); 522 rows_ = mat.rows; 523 cols_ = mat.cols; 524 type_ = mat.type(); 525 break; 526 } 527 } 528 #endif 529 } 530 531 void cv::ogl::Buffer::create(int arows, int acols, int atype, Target target, bool autoRelease) 532 { 533 #ifndef HAVE_OPENGL 534 (void) arows; 535 (void) acols; 536 (void) atype; 537 (void) target; 538 (void) autoRelease; 539 throw_no_ogl(); 540 #else 541 if (rows_ != arows || cols_ != acols || type_ != atype) 542 { 543 const GLsizeiptr asize = arows * acols * CV_ELEM_SIZE(atype); 544 impl_.reset(new Impl(asize, 0, target, autoRelease)); 545 rows_ = arows; 546 cols_ = acols; 547 type_ = atype; 548 } 549 #endif 550 } 551 552 void cv::ogl::Buffer::release() 553 { 554 #ifdef HAVE_OPENGL 555 if (impl_) 556 impl_->setAutoRelease(true); 557 impl_ = Impl::empty(); 558 rows_ = 0; 559 cols_ = 0; 560 type_ = 0; 561 #endif 562 } 563 564 void cv::ogl::Buffer::setAutoRelease(bool flag) 565 { 566 #ifndef HAVE_OPENGL 567 (void) flag; 568 throw_no_ogl(); 569 #else 570 impl_->setAutoRelease(flag); 571 #endif 572 } 573 574 void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease) 575 { 576 #ifndef HAVE_OPENGL 577 (void) arr; 578 (void) target; 579 (void) autoRelease; 580 throw_no_ogl(); 581 #else 582 const int kind = arr.kind(); 583 584 const Size asize = arr.size(); 585 const int atype = arr.type(); 586 create(asize, atype, target, autoRelease); 587 588 switch (kind) 589 { 590 case _InputArray::OPENGL_BUFFER: 591 { 592 ogl::Buffer buf = arr.getOGlBuffer(); 593 impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype)); 594 break; 595 } 596 597 case _InputArray::CUDA_GPU_MAT: 598 { 599 #ifndef HAVE_CUDA 600 throw_no_cuda(); 601 #else 602 GpuMat dmat = arr.getGpuMat(); 603 impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); 604 #endif 605 606 break; 607 } 608 609 default: 610 { 611 Mat mat = arr.getMat(); 612 CV_Assert( mat.isContinuous() ); 613 impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data); 614 } 615 } 616 #endif 617 } 618 619 void cv::ogl::Buffer::copyFrom(InputArray arr, cuda::Stream& stream, Target target, bool autoRelease) 620 { 621 #ifndef HAVE_OPENGL 622 (void) arr; 623 (void) stream; 624 (void) target; 625 (void) autoRelease; 626 throw_no_ogl(); 627 #else 628 #ifndef HAVE_CUDA 629 (void) arr; 630 (void) stream; 631 (void) target; 632 (void) autoRelease; 633 throw_no_cuda(); 634 #else 635 GpuMat dmat = arr.getGpuMat(); 636 637 create(dmat.size(), dmat.type(), target, autoRelease); 638 639 impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream)); 640 #endif 641 #endif 642 } 643 644 void cv::ogl::Buffer::copyTo(OutputArray arr) const 645 { 646 #ifndef HAVE_OPENGL 647 (void) arr; 648 throw_no_ogl(); 649 #else 650 const int kind = arr.kind(); 651 652 switch (kind) 653 { 654 case _InputArray::OPENGL_BUFFER: 655 { 656 arr.getOGlBufferRef().copyFrom(*this); 657 break; 658 } 659 660 case _InputArray::CUDA_GPU_MAT: 661 { 662 #ifndef HAVE_CUDA 663 throw_no_cuda(); 664 #else 665 GpuMat& dmat = arr.getGpuMatRef(); 666 dmat.create(rows_, cols_, type_); 667 impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); 668 #endif 669 670 break; 671 } 672 673 default: 674 { 675 arr.create(rows_, cols_, type_); 676 Mat mat = arr.getMat(); 677 CV_Assert( mat.isContinuous() ); 678 impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data); 679 } 680 } 681 #endif 682 } 683 684 void cv::ogl::Buffer::copyTo(OutputArray arr, cuda::Stream& stream) const 685 { 686 #ifndef HAVE_OPENGL 687 (void) arr; 688 (void) stream; 689 throw_no_ogl(); 690 #else 691 #ifndef HAVE_CUDA 692 (void) arr; 693 (void) stream; 694 throw_no_cuda(); 695 #else 696 arr.create(rows_, cols_, type_); 697 GpuMat dmat = arr.getGpuMat(); 698 impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream)); 699 #endif 700 #endif 701 } 702 703 cv::ogl::Buffer cv::ogl::Buffer::clone(Target target, bool autoRelease) const 704 { 705 #ifndef HAVE_OPENGL 706 (void) target; 707 (void) autoRelease; 708 throw_no_ogl(); 709 return cv::ogl::Buffer(); 710 #else 711 ogl::Buffer buf; 712 buf.copyFrom(*this, target, autoRelease); 713 return buf; 714 #endif 715 } 716 717 void cv::ogl::Buffer::bind(Target target) const 718 { 719 #ifndef HAVE_OPENGL 720 (void) target; 721 throw_no_ogl(); 722 #else 723 impl_->bind(target); 724 #endif 725 } 726 727 void cv::ogl::Buffer::unbind(Target target) 728 { 729 #ifndef HAVE_OPENGL 730 (void) target; 731 throw_no_ogl(); 732 #else 733 gl::BindBuffer(target, 0); 734 CV_CheckGlError(); 735 #endif 736 } 737 738 Mat cv::ogl::Buffer::mapHost(Access access) 739 { 740 #ifndef HAVE_OPENGL 741 (void) access; 742 throw_no_ogl(); 743 return Mat(); 744 #else 745 return Mat(rows_, cols_, type_, impl_->mapHost(access)); 746 #endif 747 } 748 749 void cv::ogl::Buffer::unmapHost() 750 { 751 #ifndef HAVE_OPENGL 752 throw_no_ogl(); 753 #else 754 return impl_->unmapHost(); 755 #endif 756 } 757 758 GpuMat cv::ogl::Buffer::mapDevice() 759 { 760 #ifndef HAVE_OPENGL 761 throw_no_ogl(); 762 return GpuMat(); 763 #else 764 #ifndef HAVE_CUDA 765 throw_no_cuda(); 766 return GpuMat(); 767 #else 768 return GpuMat(rows_, cols_, type_, impl_->mapDevice()); 769 #endif 770 #endif 771 } 772 773 void cv::ogl::Buffer::unmapDevice() 774 { 775 #ifndef HAVE_OPENGL 776 throw_no_ogl(); 777 #else 778 #ifndef HAVE_CUDA 779 throw_no_cuda(); 780 #else 781 impl_->unmapDevice(); 782 #endif 783 #endif 784 } 785 786 cuda::GpuMat cv::ogl::Buffer::mapDevice(cuda::Stream& stream) 787 { 788 #ifndef HAVE_OPENGL 789 (void) stream; 790 throw_no_ogl(); 791 return GpuMat(); 792 #else 793 #ifndef HAVE_CUDA 794 (void) stream; 795 throw_no_cuda(); 796 return GpuMat(); 797 #else 798 return GpuMat(rows_, cols_, type_, impl_->mapDevice(cuda::StreamAccessor::getStream(stream))); 799 #endif 800 #endif 801 } 802 803 void cv::ogl::Buffer::unmapDevice(cuda::Stream& stream) 804 { 805 #ifndef HAVE_OPENGL 806 (void) stream; 807 throw_no_ogl(); 808 #else 809 #ifndef HAVE_CUDA 810 (void) stream; 811 throw_no_cuda(); 812 #else 813 impl_->unmapDevice(cuda::StreamAccessor::getStream(stream)); 814 #endif 815 #endif 816 } 817 818 unsigned int cv::ogl::Buffer::bufId() const 819 { 820 #ifndef HAVE_OPENGL 821 throw_no_ogl(); 822 return 0; 823 #else 824 return impl_->bufId(); 825 #endif 826 } 827 828 829 ////////////////////////////////////////////////////////////////////////////////////////// 830 // ogl::Texture 831 832 #ifndef HAVE_OPENGL 833 834 class cv::ogl::Texture2D::Impl 835 { 836 }; 837 838 #else 839 840 class cv::ogl::Texture2D::Impl 841 { 842 public: 843 static const Ptr<Impl> empty(); 844 845 Impl(GLuint texId, bool autoRelease); 846 Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease); 847 ~Impl(); 848 849 void copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); 850 void copyTo(GLenum format, GLenum type, GLvoid* pixels) const; 851 852 void bind() const; 853 854 void setAutoRelease(bool flag) { autoRelease_ = flag; } 855 856 GLuint texId() const { return texId_; } 857 858 private: 859 Impl(); 860 861 GLuint texId_; 862 bool autoRelease_; 863 }; 864 865 const Ptr<cv::ogl::Texture2D::Impl> cv::ogl::Texture2D::Impl::empty() 866 { 867 static Ptr<Impl> p(new Impl); 868 return p; 869 } 870 871 cv::ogl::Texture2D::Impl::Impl() : texId_(0), autoRelease_(false) 872 { 873 } 874 875 cv::ogl::Texture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease) 876 { 877 CV_Assert( gl::IsTexture(atexId) == gl::TRUE_ ); 878 } 879 880 cv::ogl::Texture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease) 881 { 882 gl::GenTextures(1, &texId_); 883 CV_CheckGlError(); 884 885 CV_Assert(texId_ != 0); 886 887 gl::BindTexture(gl::TEXTURE_2D, texId_); 888 CV_CheckGlError(); 889 890 gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1); 891 CV_CheckGlError(); 892 893 gl::TexImage2D(gl::TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, pixels); 894 CV_CheckGlError(); 895 896 gl::GenerateMipmap(gl::TEXTURE_2D); 897 CV_CheckGlError(); 898 } 899 900 cv::ogl::Texture2D::Impl::~Impl() 901 { 902 if (autoRelease_ && texId_) 903 gl::DeleteTextures(1, &texId_); 904 } 905 906 void cv::ogl::Texture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels) 907 { 908 gl::BindTexture(gl::TEXTURE_2D, texId_); 909 CV_CheckGlError(); 910 911 gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1); 912 CV_CheckGlError(); 913 914 gl::TexSubImage2D(gl::TEXTURE_2D, 0, 0, 0, width, height, format, type, pixels); 915 CV_CheckGlError(); 916 917 gl::GenerateMipmap(gl::TEXTURE_2D); 918 CV_CheckGlError(); 919 } 920 921 void cv::ogl::Texture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const 922 { 923 gl::BindTexture(gl::TEXTURE_2D, texId_); 924 CV_CheckGlError(); 925 926 gl::PixelStorei(gl::PACK_ALIGNMENT, 1); 927 CV_CheckGlError(); 928 929 gl::GetTexImage(gl::TEXTURE_2D, 0, format, type, pixels); 930 CV_CheckGlError(); 931 } 932 933 void cv::ogl::Texture2D::Impl::bind() const 934 { 935 gl::BindTexture(gl::TEXTURE_2D, texId_); 936 CV_CheckGlError(); 937 } 938 939 #endif // HAVE_OPENGL 940 941 cv::ogl::Texture2D::Texture2D() : rows_(0), cols_(0), format_(NONE) 942 { 943 #ifndef HAVE_OPENGL 944 throw_no_ogl(); 945 #else 946 impl_ = Impl::empty(); 947 #endif 948 } 949 950 cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE) 951 { 952 #ifndef HAVE_OPENGL 953 (void) arows; 954 (void) acols; 955 (void) aformat; 956 (void) atexId; 957 (void) autoRelease; 958 throw_no_ogl(); 959 #else 960 impl_.reset(new Impl(atexId, autoRelease)); 961 rows_ = arows; 962 cols_ = acols; 963 format_ = aformat; 964 #endif 965 } 966 967 cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE) 968 { 969 #ifndef HAVE_OPENGL 970 (void) asize; 971 (void) aformat; 972 (void) atexId; 973 (void) autoRelease; 974 throw_no_ogl(); 975 #else 976 impl_.reset(new Impl(atexId, autoRelease)); 977 rows_ = asize.height; 978 cols_ = asize.width; 979 format_ = aformat; 980 #endif 981 } 982 983 cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE) 984 { 985 #ifndef HAVE_OPENGL 986 (void) arr; 987 (void) autoRelease; 988 throw_no_ogl(); 989 #else 990 const int kind = arr.kind(); 991 992 const Size asize = arr.size(); 993 const int atype = arr.type(); 994 995 const int depth = CV_MAT_DEPTH(atype); 996 const int cn = CV_MAT_CN(atype); 997 998 CV_Assert( depth <= CV_32F ); 999 CV_Assert( cn == 1 || cn == 3 || cn == 4 ); 1000 1001 const Format internalFormats[] = 1002 { 1003 NONE, DEPTH_COMPONENT, NONE, RGB, RGBA 1004 }; 1005 const GLenum srcFormats[] = 1006 { 1007 0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA 1008 }; 1009 1010 switch (kind) 1011 { 1012 case _InputArray::OPENGL_BUFFER: 1013 { 1014 ogl::Buffer buf = arr.getOGlBuffer(); 1015 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1016 impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease)); 1017 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1018 break; 1019 } 1020 1021 case _InputArray::CUDA_GPU_MAT: 1022 { 1023 #ifndef HAVE_CUDA 1024 throw_no_cuda(); 1025 #else 1026 GpuMat dmat = arr.getGpuMat(); 1027 ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER); 1028 buf.setAutoRelease(true); 1029 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1030 impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease)); 1031 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1032 #endif 1033 1034 break; 1035 } 1036 1037 default: 1038 { 1039 Mat mat = arr.getMat(); 1040 CV_Assert( mat.isContinuous() ); 1041 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1042 impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease)); 1043 break; 1044 } 1045 } 1046 1047 rows_ = asize.height; 1048 cols_ = asize.width; 1049 format_ = internalFormats[cn]; 1050 #endif 1051 } 1052 1053 void cv::ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease) 1054 { 1055 #ifndef HAVE_OPENGL 1056 (void) arows; 1057 (void) acols; 1058 (void) aformat; 1059 (void) autoRelease; 1060 throw_no_ogl(); 1061 #else 1062 if (rows_ != arows || cols_ != acols || format_ != aformat) 1063 { 1064 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1065 impl_.reset(new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease)); 1066 rows_ = arows; 1067 cols_ = acols; 1068 format_ = aformat; 1069 } 1070 #endif 1071 } 1072 1073 void cv::ogl::Texture2D::release() 1074 { 1075 #ifdef HAVE_OPENGL 1076 if (impl_) 1077 impl_->setAutoRelease(true); 1078 impl_ = Impl::empty(); 1079 rows_ = 0; 1080 cols_ = 0; 1081 format_ = NONE; 1082 #endif 1083 } 1084 1085 void cv::ogl::Texture2D::setAutoRelease(bool flag) 1086 { 1087 #ifndef HAVE_OPENGL 1088 (void) flag; 1089 throw_no_ogl(); 1090 #else 1091 impl_->setAutoRelease(flag); 1092 #endif 1093 } 1094 1095 void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease) 1096 { 1097 #ifndef HAVE_OPENGL 1098 (void) arr; 1099 (void) autoRelease; 1100 throw_no_ogl(); 1101 #else 1102 const int kind = arr.kind(); 1103 1104 const Size asize = arr.size(); 1105 const int atype = arr.type(); 1106 1107 const int depth = CV_MAT_DEPTH(atype); 1108 const int cn = CV_MAT_CN(atype); 1109 1110 CV_Assert( depth <= CV_32F ); 1111 CV_Assert( cn == 1 || cn == 3 || cn == 4 ); 1112 1113 const Format internalFormats[] = 1114 { 1115 NONE, DEPTH_COMPONENT, NONE, RGB, RGBA 1116 }; 1117 const GLenum srcFormats[] = 1118 { 1119 0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA 1120 }; 1121 1122 create(asize, internalFormats[cn], autoRelease); 1123 1124 switch(kind) 1125 { 1126 case _InputArray::OPENGL_BUFFER: 1127 { 1128 ogl::Buffer buf = arr.getOGlBuffer(); 1129 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1130 impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); 1131 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1132 break; 1133 } 1134 1135 case _InputArray::CUDA_GPU_MAT: 1136 { 1137 #ifndef HAVE_CUDA 1138 throw_no_cuda(); 1139 #else 1140 GpuMat dmat = arr.getGpuMat(); 1141 ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER); 1142 buf.setAutoRelease(true); 1143 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1144 impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); 1145 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1146 #endif 1147 1148 break; 1149 } 1150 1151 default: 1152 { 1153 Mat mat = arr.getMat(); 1154 CV_Assert( mat.isContinuous() ); 1155 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 1156 impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data); 1157 } 1158 } 1159 #endif 1160 } 1161 1162 void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const 1163 { 1164 #ifndef HAVE_OPENGL 1165 (void) arr; 1166 (void) ddepth; 1167 (void) autoRelease; 1168 throw_no_ogl(); 1169 #else 1170 const int kind = arr.kind(); 1171 1172 const int cn = format_ == DEPTH_COMPONENT ? 1: format_ == RGB ? 3 : 4; 1173 const GLenum dstFormat = format_ == DEPTH_COMPONENT ? gl::DEPTH_COMPONENT : format_ == RGB ? gl::BGR : gl::BGRA; 1174 1175 switch(kind) 1176 { 1177 case _InputArray::OPENGL_BUFFER: 1178 { 1179 ogl::Buffer& buf = arr.getOGlBufferRef(); 1180 buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease); 1181 buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER); 1182 impl_->copyTo(dstFormat, gl_types[ddepth], 0); 1183 ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); 1184 break; 1185 } 1186 1187 case _InputArray::CUDA_GPU_MAT: 1188 { 1189 #ifndef HAVE_CUDA 1190 throw_no_cuda(); 1191 #else 1192 ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER); 1193 buf.setAutoRelease(true); 1194 buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER); 1195 impl_->copyTo(dstFormat, gl_types[ddepth], 0); 1196 ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); 1197 buf.copyTo(arr); 1198 #endif 1199 1200 break; 1201 } 1202 1203 default: 1204 { 1205 arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn)); 1206 Mat mat = arr.getMat(); 1207 CV_Assert( mat.isContinuous() ); 1208 ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); 1209 impl_->copyTo(dstFormat, gl_types[ddepth], mat.data); 1210 } 1211 } 1212 #endif 1213 } 1214 1215 void cv::ogl::Texture2D::bind() const 1216 { 1217 #ifndef HAVE_OPENGL 1218 throw_no_ogl(); 1219 #else 1220 impl_->bind(); 1221 #endif 1222 } 1223 1224 unsigned int cv::ogl::Texture2D::texId() const 1225 { 1226 #ifndef HAVE_OPENGL 1227 throw_no_ogl(); 1228 return 0; 1229 #else 1230 return impl_->texId(); 1231 #endif 1232 } 1233 1234 1235 //////////////////////////////////////////////////////////////////////// 1236 // ogl::Arrays 1237 1238 void cv::ogl::Arrays::setVertexArray(InputArray vertex) 1239 { 1240 const int cn = vertex.channels(); 1241 const int depth = vertex.depth(); 1242 1243 CV_Assert( cn == 2 || cn == 3 || cn == 4 ); 1244 CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); 1245 1246 if (vertex.kind() == _InputArray::OPENGL_BUFFER) 1247 vertex_ = vertex.getOGlBuffer(); 1248 else 1249 vertex_.copyFrom(vertex); 1250 1251 size_ = vertex_.size().area(); 1252 } 1253 1254 void cv::ogl::Arrays::resetVertexArray() 1255 { 1256 vertex_.release(); 1257 size_ = 0; 1258 } 1259 1260 void cv::ogl::Arrays::setColorArray(InputArray color) 1261 { 1262 const int cn = color.channels(); 1263 1264 CV_Assert( cn == 3 || cn == 4 ); 1265 1266 if (color.kind() == _InputArray::OPENGL_BUFFER) 1267 color_ = color.getOGlBuffer(); 1268 else 1269 color_.copyFrom(color); 1270 } 1271 1272 void cv::ogl::Arrays::resetColorArray() 1273 { 1274 color_.release(); 1275 } 1276 1277 void cv::ogl::Arrays::setNormalArray(InputArray normal) 1278 { 1279 const int cn = normal.channels(); 1280 const int depth = normal.depth(); 1281 1282 CV_Assert( cn == 3 ); 1283 CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); 1284 1285 if (normal.kind() == _InputArray::OPENGL_BUFFER) 1286 normal_ = normal.getOGlBuffer(); 1287 else 1288 normal_.copyFrom(normal); 1289 } 1290 1291 void cv::ogl::Arrays::resetNormalArray() 1292 { 1293 normal_.release(); 1294 } 1295 1296 void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord) 1297 { 1298 const int cn = texCoord.channels(); 1299 const int depth = texCoord.depth(); 1300 1301 CV_Assert( cn >= 1 && cn <= 4 ); 1302 CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); 1303 1304 if (texCoord.kind() == _InputArray::OPENGL_BUFFER) 1305 texCoord_ = texCoord.getOGlBuffer(); 1306 else 1307 texCoord_.copyFrom(texCoord); 1308 } 1309 1310 void cv::ogl::Arrays::resetTexCoordArray() 1311 { 1312 texCoord_.release(); 1313 } 1314 1315 void cv::ogl::Arrays::release() 1316 { 1317 resetVertexArray(); 1318 resetColorArray(); 1319 resetNormalArray(); 1320 resetTexCoordArray(); 1321 } 1322 1323 void cv::ogl::Arrays::setAutoRelease(bool flag) 1324 { 1325 vertex_.setAutoRelease(flag); 1326 color_.setAutoRelease(flag); 1327 normal_.setAutoRelease(flag); 1328 texCoord_.setAutoRelease(flag); 1329 } 1330 1331 void cv::ogl::Arrays::bind() const 1332 { 1333 #ifndef HAVE_OPENGL 1334 throw_no_ogl(); 1335 #else 1336 CV_Assert( texCoord_.empty() || texCoord_.size().area() == size_ ); 1337 CV_Assert( normal_.empty() || normal_.size().area() == size_ ); 1338 CV_Assert( color_.empty() || color_.size().area() == size_ ); 1339 1340 if (texCoord_.empty()) 1341 { 1342 gl::DisableClientState(gl::TEXTURE_COORD_ARRAY); 1343 CV_CheckGlError(); 1344 } 1345 else 1346 { 1347 gl::EnableClientState(gl::TEXTURE_COORD_ARRAY); 1348 CV_CheckGlError(); 1349 1350 texCoord_.bind(ogl::Buffer::ARRAY_BUFFER); 1351 1352 gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0); 1353 CV_CheckGlError(); 1354 } 1355 1356 if (normal_.empty()) 1357 { 1358 gl::DisableClientState(gl::NORMAL_ARRAY); 1359 CV_CheckGlError(); 1360 } 1361 else 1362 { 1363 gl::EnableClientState(gl::NORMAL_ARRAY); 1364 CV_CheckGlError(); 1365 1366 normal_.bind(ogl::Buffer::ARRAY_BUFFER); 1367 1368 gl::NormalPointer(gl_types[normal_.depth()], 0, 0); 1369 CV_CheckGlError(); 1370 } 1371 1372 if (color_.empty()) 1373 { 1374 gl::DisableClientState(gl::COLOR_ARRAY); 1375 CV_CheckGlError(); 1376 } 1377 else 1378 { 1379 gl::EnableClientState(gl::COLOR_ARRAY); 1380 CV_CheckGlError(); 1381 1382 color_.bind(ogl::Buffer::ARRAY_BUFFER); 1383 1384 const int cn = color_.channels(); 1385 1386 gl::ColorPointer(cn, gl_types[color_.depth()], 0, 0); 1387 CV_CheckGlError(); 1388 } 1389 1390 if (vertex_.empty()) 1391 { 1392 gl::DisableClientState(gl::VERTEX_ARRAY); 1393 CV_CheckGlError(); 1394 } 1395 else 1396 { 1397 gl::EnableClientState(gl::VERTEX_ARRAY); 1398 CV_CheckGlError(); 1399 1400 vertex_.bind(ogl::Buffer::ARRAY_BUFFER); 1401 1402 gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0); 1403 CV_CheckGlError(); 1404 } 1405 1406 ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER); 1407 #endif 1408 } 1409 1410 //////////////////////////////////////////////////////////////////////// 1411 // Rendering 1412 1413 void cv::ogl::render(const ogl::Texture2D& tex, Rect_<double> wndRect, Rect_<double> texRect) 1414 { 1415 #ifndef HAVE_OPENGL 1416 (void) tex; 1417 (void) wndRect; 1418 (void) texRect; 1419 throw_no_ogl(); 1420 #else 1421 if (!tex.empty()) 1422 { 1423 gl::MatrixMode(gl::PROJECTION); 1424 gl::LoadIdentity(); 1425 gl::Ortho(0.0, 1.0, 1.0, 0.0, -1.0, 1.0); 1426 CV_CheckGlError(); 1427 1428 gl::MatrixMode(gl::MODELVIEW); 1429 gl::LoadIdentity(); 1430 CV_CheckGlError(); 1431 1432 gl::Disable(gl::LIGHTING); 1433 CV_CheckGlError(); 1434 1435 tex.bind(); 1436 1437 gl::Enable(gl::TEXTURE_2D); 1438 CV_CheckGlError(); 1439 1440 gl::TexEnvi(gl::TEXTURE_ENV, gl::TEXTURE_ENV_MODE, gl::REPLACE); 1441 CV_CheckGlError(); 1442 1443 gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR); 1444 CV_CheckGlError(); 1445 1446 const float vertex[] = 1447 { 1448 wndRect.x, wndRect.y, 0.0f, 1449 wndRect.x, (wndRect.y + wndRect.height), 0.0f, 1450 wndRect.x + wndRect.width, (wndRect.y + wndRect.height), 0.0f, 1451 wndRect.x + wndRect.width, wndRect.y, 0.0f 1452 }; 1453 const float texCoords[] = 1454 { 1455 texRect.x, texRect.y, 1456 texRect.x, texRect.y + texRect.height, 1457 texRect.x + texRect.width, texRect.y + texRect.height, 1458 texRect.x + texRect.width, texRect.y 1459 }; 1460 1461 ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER); 1462 1463 gl::EnableClientState(gl::TEXTURE_COORD_ARRAY); 1464 CV_CheckGlError(); 1465 1466 gl::TexCoordPointer(2, gl::FLOAT, 0, texCoords); 1467 CV_CheckGlError(); 1468 1469 gl::DisableClientState(gl::NORMAL_ARRAY); 1470 gl::DisableClientState(gl::COLOR_ARRAY); 1471 CV_CheckGlError(); 1472 1473 gl::EnableClientState(gl::VERTEX_ARRAY); 1474 CV_CheckGlError(); 1475 1476 gl::VertexPointer(3, gl::FLOAT, 0, vertex); 1477 CV_CheckGlError(); 1478 1479 gl::DrawArrays(gl::QUADS, 0, 4); 1480 CV_CheckGlError(); 1481 } 1482 #endif 1483 } 1484 1485 void cv::ogl::render(const ogl::Arrays& arr, int mode, Scalar color) 1486 { 1487 #ifndef HAVE_OPENGL 1488 (void) arr; 1489 (void) mode; 1490 (void) color; 1491 throw_no_ogl(); 1492 #else 1493 if (!arr.empty()) 1494 { 1495 gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0); 1496 1497 arr.bind(); 1498 1499 gl::DrawArrays(mode, 0, arr.size()); 1500 } 1501 #endif 1502 } 1503 1504 void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color) 1505 { 1506 #ifndef HAVE_OPENGL 1507 (void) arr; 1508 (void) indices; 1509 (void) mode; 1510 (void) color; 1511 throw_no_ogl(); 1512 #else 1513 if (!arr.empty() && !indices.empty()) 1514 { 1515 gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0); 1516 1517 arr.bind(); 1518 1519 const int kind = indices.kind(); 1520 1521 switch (kind) 1522 { 1523 case _InputArray::OPENGL_BUFFER : 1524 { 1525 ogl::Buffer buf = indices.getOGlBuffer(); 1526 1527 const int depth = buf.depth(); 1528 1529 CV_Assert( buf.channels() == 1 ); 1530 CV_Assert( depth <= CV_32S ); 1531 1532 GLenum type; 1533 if (depth < CV_16U) 1534 type = gl::UNSIGNED_BYTE; 1535 else if (depth < CV_32S) 1536 type = gl::UNSIGNED_SHORT; 1537 else 1538 type = gl::UNSIGNED_INT; 1539 1540 buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); 1541 1542 gl::DrawElements(mode, buf.size().area(), type, 0); 1543 1544 ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); 1545 1546 break; 1547 } 1548 1549 default: 1550 { 1551 Mat mat = indices.getMat(); 1552 1553 const int depth = mat.depth(); 1554 1555 CV_Assert( mat.channels() == 1 ); 1556 CV_Assert( depth <= CV_32S ); 1557 CV_Assert( mat.isContinuous() ); 1558 1559 GLenum type; 1560 if (depth < CV_16U) 1561 type = gl::UNSIGNED_BYTE; 1562 else if (depth < CV_32S) 1563 type = gl::UNSIGNED_SHORT; 1564 else 1565 type = gl::UNSIGNED_INT; 1566 1567 ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); 1568 1569 gl::DrawElements(mode, mat.size().area(), type, mat.data); 1570 } 1571 } 1572 } 1573 #endif 1574 } 1575