1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // Intel License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000, Intel Corporation, all rights reserved. 14 // Third party copyrights are property of their respective owners. 15 // 16 // Redistribution and use in source and binary forms, with or without modification, 17 // are permitted provided that the following conditions are met: 18 // 19 // * Redistribution's of source code must retain the above copyright notice, 20 // this list of conditions and the following disclaimer. 21 // 22 // * Redistribution's in binary form must reproduce the above copyright notice, 23 // this list of conditions and the following disclaimer in the documentation 24 // and/or other materials provided with the distribution. 25 // 26 // * The name of Intel Corporation may not be used to endorse or promote products 27 // derived from this software without specific prior written permission. 28 // 29 // This software is provided by the copyright holders and contributors "as is" and 30 // any express or implied warranties, including, but not limited to, the implied 31 // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 // In no event shall the Intel Corporation or contributors be liable for any direct, 33 // indirect, incidental, special, exemplary, or consequential damages 34 // (including, but not limited to, procurement of substitute goods or services; 35 // loss of use, data, or profits; or business interruption) however caused 36 // and on any theory of liability, whether in contract, strict liability, 37 // or tort (including negligence or otherwise) arising in any way out of 38 // the use of this software, even if advised of the possibility of such damage. 39 // 40 //M*/ 41 42 /* //////////////////////////////////////////////////////////////////// 43 // 44 // CvMat logical operations: &, |, ^ ... 45 // 46 // */ 47 48 #include "_cxcore.h" 49 50 ///////////////////////////////////////////////////////////////////////////////////////// 51 // // 52 // Macros for logic operations // 53 // // 54 ///////////////////////////////////////////////////////////////////////////////////////// 55 56 /* ////////////////////////////////////////////////////////////////////////////////////// 57 Mat op Mat 58 ////////////////////////////////////////////////////////////////////////////////////// */ 59 60 61 #define ICV_DEF_BIN_LOG_OP_2D( __op__, name ) \ 62 IPCVAPI_IMPL( CvStatus, icv##name##_8u_C1R, \ 63 ( const uchar* src1, int step1, const uchar* src2, int step2, \ 64 uchar* dst, int step, CvSize size ), (src1, step1, src2, step2, dst, step, size) )\ 65 { \ 66 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \ 67 { \ 68 int i = 0; \ 69 \ 70 if( (((size_t)src1 | (size_t)src2 | (size_t)dst) & 3) == 0 ) \ 71 { \ 72 for( ; i <= size.width - 16; i += 16 ) \ 73 { \ 74 int t0 = __op__(((const int*)(src1+i))[0], ((const int*)(src2+i))[0]);\ 75 int t1 = __op__(((const int*)(src1+i))[1], ((const int*)(src2+i))[1]);\ 76 \ 77 ((int*)(dst+i))[0] = t0; \ 78 ((int*)(dst+i))[1] = t1; \ 79 \ 80 t0 = __op__(((const int*)(src1+i))[2], ((const int*)(src2+i))[2]); \ 81 t1 = __op__(((const int*)(src1+i))[3], ((const int*)(src2+i))[3]); \ 82 \ 83 ((int*)(dst+i))[2] = t0; \ 84 ((int*)(dst+i))[3] = t1; \ 85 } \ 86 \ 87 for( ; i <= size.width - 4; i += 4 ) \ 88 { \ 89 int t = __op__(*(const int*)(src1+i), *(const int*)(src2+i)); \ 90 *(int*)(dst+i) = t; \ 91 } \ 92 } \ 93 \ 94 for( ; i < size.width; i++ ) \ 95 { \ 96 int t = __op__(((const uchar*)src1)[i],((const uchar*)src2)[i]); \ 97 dst[i] = (uchar)t; \ 98 } \ 99 } \ 100 \ 101 return CV_OK; \ 102 } 103 104 105 /* ////////////////////////////////////////////////////////////////////////////////////// 106 Mat op Scalar 107 ////////////////////////////////////////////////////////////////////////////////////// */ 108 109 110 #define ICV_DEF_UN_LOG_OP_2D( __op__, name ) \ 111 static CvStatus CV_STDCALL icv##name##_8u_CnR \ 112 ( const uchar* src0, int step1, uchar* dst0, int step, CvSize size, \ 113 const uchar* scalar, int pix_size ) \ 114 { \ 115 int delta = 12*pix_size; \ 116 \ 117 for( ; size.height--; src0 += step1, dst0 += step ) \ 118 { \ 119 const uchar* src = (const uchar*)src0; \ 120 uchar* dst = dst0; \ 121 int i, len = size.width; \ 122 \ 123 if( (((size_t)src|(size_t)dst) & 3) == 0 ) \ 124 { \ 125 while( (len -= delta) >= 0 ) \ 126 { \ 127 for( i = 0; i < (delta); i += 12 ) \ 128 { \ 129 int t0 = __op__(((const int*)(src+i))[0], ((const int*)(scalar+i))[0]); \ 130 int t1 = __op__(((const int*)(src+i))[1], ((const int*)(scalar+i))[1]); \ 131 ((int*)(dst+i))[0] = t0; \ 132 ((int*)(dst+i))[1] = t1; \ 133 \ 134 t0 = __op__(((const int*)(src+i))[2], ((const int*)(scalar+i))[2]); \ 135 ((int*)(dst+i))[2] = t0; \ 136 } \ 137 src += delta; \ 138 dst += delta; \ 139 } \ 140 } \ 141 else \ 142 { \ 143 while( (len -= delta) >= 0 ) \ 144 { \ 145 for( i = 0; i < (delta); i += 4 ) \ 146 { \ 147 int t0 = __op__(src[i], scalar[i]); \ 148 int t1 = __op__(src[i+1], scalar[i+1]); \ 149 dst[i] = (uchar)t0; \ 150 dst[i+1] = (uchar)t1; \ 151 \ 152 t0 = __op__(src[i+2], scalar[i+2]); \ 153 t1 = __op__(src[i+3], scalar[i+3]); \ 154 dst[i+2] = (uchar)t0; \ 155 dst[i+3] = (uchar)t1; \ 156 } \ 157 src += delta; \ 158 dst += delta; \ 159 } \ 160 } \ 161 \ 162 for( len += delta, i = 0; i < len; i++ ) \ 163 { \ 164 int t = __op__(src[i],scalar[i]); \ 165 dst[i] = (uchar)t; \ 166 } \ 167 } \ 168 \ 169 return CV_OK; \ 170 } 171 172 ///////////////////////////////////////////////////////////////////////////////////////// 173 // // 174 // LOGIC OPERATIONS // 175 // // 176 ///////////////////////////////////////////////////////////////////////////////////////// 177 178 static void 179 icvLogicS( const void* srcarr, CvScalar* scalar, void* dstarr, 180 const void* maskarr, CvFunc2D_2A1P1I fn_2d ) 181 { 182 uchar* buffer = 0; 183 int local_alloc = 1; 184 185 CV_FUNCNAME( "icvLogicS" ); 186 187 __BEGIN__; 188 189 CvMat srcstub, *src = (CvMat*)srcarr; 190 CvMat dststub, *dst = (CvMat*)dstarr; 191 CvMat maskstub, *mask = (CvMat*)maskarr; 192 CvMat dstbuf, *tdst; 193 CvCopyMaskFunc copym_func = 0; 194 195 int y, dy; 196 int coi1 = 0, coi2 = 0; 197 int is_nd = 0, cont_flag = 0; 198 int elem_size, elem_size1, type, depth; 199 double buf[12]; 200 CvSize size, tsize; 201 int src_step, dst_step, tdst_step, mask_step; 202 203 if( !CV_IS_MAT(src)) 204 { 205 if( CV_IS_MATND(src) ) 206 is_nd = 1; 207 else 208 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); 209 } 210 211 if( !CV_IS_MAT(dst)) 212 { 213 if( CV_IS_MATND(dst) ) 214 is_nd = 1; 215 else 216 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); 217 } 218 219 if( is_nd ) 220 { 221 CvArr* arrs[] = { src, dst }; 222 CvMatND stubs[2]; 223 CvNArrayIterator iterator; 224 225 if( maskarr ) 226 CV_ERROR( CV_StsBadMask, 227 "This operation on multi-dimensional arrays does not support mask" ); 228 229 CV_CALL( cvInitNArrayIterator( 2, arrs, 0, stubs, &iterator )); 230 231 type = CV_MAT_TYPE(iterator.hdr[0]->type); 232 depth = CV_MAT_DEPTH(type); 233 iterator.size.width *= CV_ELEM_SIZE(type); 234 elem_size1 = CV_ELEM_SIZE1(depth); 235 236 CV_CALL( cvScalarToRawData( scalar, buf, type, 1 )); 237 238 do 239 { 240 IPPI_CALL( fn_2d( iterator.ptr[0], CV_STUB_STEP, 241 iterator.ptr[1], CV_STUB_STEP, 242 iterator.size, buf, elem_size1 )); 243 } 244 while( cvNextNArraySlice( &iterator )); 245 EXIT; 246 } 247 248 if( coi1 != 0 || coi2 != 0 ) 249 CV_ERROR( CV_BadCOI, "" ); 250 251 if( !CV_ARE_TYPES_EQ( src, dst ) ) 252 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); 253 254 if( !CV_ARE_SIZES_EQ( src, dst ) ) 255 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); 256 257 size = cvGetMatSize( src ); 258 type = CV_MAT_TYPE(src->type); 259 depth = CV_MAT_DEPTH(type); 260 elem_size = CV_ELEM_SIZE(type); 261 elem_size1 = CV_ELEM_SIZE1(depth); 262 263 if( !mask ) 264 { 265 cont_flag = CV_IS_MAT_CONT( src->type & dst->type ); 266 dy = size.height; 267 tdst = dst; 268 } 269 else 270 { 271 int buf_size; 272 273 if( !CV_IS_MAT(mask) ) 274 CV_CALL( mask = cvGetMat( mask, &maskstub )); 275 276 if( !CV_IS_MASK_ARR(mask)) 277 CV_ERROR( CV_StsBadMask, "" ); 278 279 if( !CV_ARE_SIZES_EQ( mask, dst )) 280 CV_ERROR( CV_StsUnmatchedSizes, "" ); 281 282 cont_flag = CV_IS_MAT_CONT( src->type & dst->type & mask->type ); 283 dy = CV_MAX_LOCAL_SIZE/(elem_size*size.height); 284 dy = MAX(dy,1); 285 dy = MIN(dy,size.height); 286 dstbuf = cvMat( dy, size.width, type ); 287 if( !cont_flag ) 288 dstbuf.step = cvAlign( dstbuf.step, 8 ); 289 buf_size = dstbuf.step ? dstbuf.step*dy : size.width*elem_size; 290 if( buf_size > CV_MAX_LOCAL_SIZE ) 291 { 292 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 293 local_alloc = 0; 294 } 295 else 296 buffer = (uchar*)cvStackAlloc( buf_size ); 297 dstbuf.data.ptr = buffer; 298 tdst = &dstbuf; 299 300 copym_func = icvGetCopyMaskFunc( elem_size ); 301 } 302 303 src_step = src->step; 304 dst_step = dst->step; 305 tdst_step = tdst->step; 306 mask_step = mask ? mask->step : 0; 307 CV_CALL( cvScalarToRawData( scalar, buf, type, 1 )); 308 309 for( y = 0; y < size.height; y += dy ) 310 { 311 tsize.width = size.width; 312 tsize.height = dy; 313 if( y + dy > size.height ) 314 tsize.height = size.height - y; 315 if( cont_flag || tsize.height == 1 ) 316 { 317 tsize.width *= tsize.height; 318 tsize.height = 1; 319 src_step = tdst_step = dst_step = mask_step = CV_STUB_STEP; 320 } 321 IPPI_CALL( fn_2d( src->data.ptr + y*src->step, src_step, tdst->data.ptr, tdst_step, 322 cvSize(tsize.width*elem_size, tsize.height), buf, elem_size1 )); 323 if( mask ) 324 { 325 IPPI_CALL( copym_func( tdst->data.ptr, tdst_step, dst->data.ptr + y*dst->step, 326 dst_step, tsize, mask->data.ptr + y*mask->step, mask_step )); 327 } 328 } 329 330 __END__; 331 332 if( !local_alloc ) 333 cvFree( &buffer ); 334 } 335 336 337 static void 338 icvLogic( const void* srcarr1, const void* srcarr2, void* dstarr, 339 const void* maskarr, CvFunc2D_3A fn_2d ) 340 { 341 uchar* buffer = 0; 342 int local_alloc = 1; 343 344 CV_FUNCNAME( "icvLogic" ); 345 346 __BEGIN__; 347 348 int y, dy; 349 int coi1 = 0, coi2 = 0, coi3 = 0; 350 int type, elem_size; 351 int is_nd = 0, cont_flag = 0; 352 CvMat srcstub1, *src1 = (CvMat*)srcarr1; 353 CvMat srcstub2, *src2 = (CvMat*)srcarr2; 354 CvMat dststub, *dst = (CvMat*)dstarr; 355 CvMat maskstub, *mask = (CvMat*)maskarr; 356 CvMat dstbuf, *tdst; 357 int src1_step, src2_step, tdst_step, dst_step, mask_step; 358 CvSize size, tsize; 359 CvCopyMaskFunc copym_func = 0; 360 361 if( !CV_IS_MAT(src1)) 362 { 363 if( CV_IS_MATND(src1) ) 364 is_nd = 1; 365 else 366 CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 )); 367 } 368 369 if( !CV_IS_MAT(src2)) 370 { 371 if( CV_IS_MATND(src2) ) 372 is_nd = 1; 373 else 374 CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 )); 375 } 376 377 if( !CV_IS_MAT(dst)) 378 { 379 if( CV_IS_MATND(dst) ) 380 is_nd = 1; 381 else 382 CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 )); 383 } 384 385 if( is_nd ) 386 { 387 CvArr* arrs[] = { src1, src2, dst }; 388 CvMatND stubs[3]; 389 CvNArrayIterator iterator; 390 391 if( maskarr ) 392 CV_ERROR( CV_StsBadMask, 393 "This operation on multi-dimensional arrays does not support mask" ); 394 395 CV_CALL( cvInitNArrayIterator( 3, arrs, 0, stubs, &iterator )); 396 397 type = CV_MAT_TYPE(iterator.hdr[0]->type); 398 iterator.size.width *= CV_ELEM_SIZE(type); 399 400 do 401 { 402 IPPI_CALL( fn_2d( iterator.ptr[0], CV_STUB_STEP, 403 iterator.ptr[1], CV_STUB_STEP, 404 iterator.ptr[2], CV_STUB_STEP, 405 iterator.size )); 406 } 407 while( cvNextNArraySlice( &iterator )); 408 EXIT; 409 } 410 411 if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) 412 CV_ERROR_FROM_CODE( CV_BadCOI ); 413 414 if( !CV_ARE_TYPES_EQ( src1, src2 ) ) 415 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); 416 417 if( !CV_ARE_SIZES_EQ( src1, src2 ) ) 418 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); 419 420 if( !CV_ARE_TYPES_EQ( src1, dst ) ) 421 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); 422 423 if( !CV_ARE_SIZES_EQ( src1, dst ) ) 424 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); 425 426 size = cvGetMatSize( src1 ); 427 type = CV_MAT_TYPE( src1->type ); 428 elem_size = CV_ELEM_SIZE(type); 429 430 if( !mask ) 431 { 432 cont_flag = CV_IS_MAT_CONT( src1->type & src2->type & dst->type ); 433 dy = size.height; 434 tdst = dst; 435 } 436 else 437 { 438 int buf_size; 439 440 if( !CV_IS_MAT(mask) ) 441 CV_CALL( mask = cvGetMat( mask, &maskstub )); 442 443 if( !CV_IS_MASK_ARR(mask)) 444 CV_ERROR( CV_StsBadMask, "" ); 445 446 if( !CV_ARE_SIZES_EQ( mask, dst )) 447 CV_ERROR( CV_StsUnmatchedSizes, "" ); 448 449 cont_flag = CV_IS_MAT_CONT( src1->type & src2->type & dst->type & mask->type ); 450 dy = CV_MAX_LOCAL_SIZE/(elem_size*size.height); 451 dy = MAX(dy,1); 452 dy = MIN(dy,size.height); 453 dstbuf = cvMat( dy, size.width, type ); 454 if( !cont_flag ) 455 dstbuf.step = cvAlign( dstbuf.step, 8 ); 456 buf_size = dstbuf.step ? dstbuf.step*dy : size.width*elem_size; 457 if( buf_size > CV_MAX_LOCAL_SIZE ) 458 { 459 CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); 460 local_alloc = 0; 461 } 462 else 463 buffer = (uchar*)cvStackAlloc( buf_size ); 464 dstbuf.data.ptr = buffer; 465 tdst = &dstbuf; 466 467 copym_func = icvGetCopyMaskFunc( elem_size ); 468 } 469 470 src1_step = src1->step; 471 src2_step = src2->step; 472 dst_step = dst->step; 473 tdst_step = tdst->step; 474 mask_step = mask ? mask->step : 0; 475 476 for( y = 0; y < size.height; y += dy ) 477 { 478 tsize.width = size.width; 479 tsize.height = dy; 480 if( y + dy > size.height ) 481 tsize.height = size.height - y; 482 if( cont_flag || tsize.height == 1 ) 483 { 484 tsize.width *= tsize.height; 485 tsize.height = 1; 486 src1_step = src2_step = tdst_step = dst_step = mask_step = CV_STUB_STEP; 487 } 488 IPPI_CALL( fn_2d( src1->data.ptr + y*src1->step, src1_step, 489 src2->data.ptr + y*src2->step, src2_step, 490 tdst->data.ptr, tdst_step, 491 cvSize(tsize.width*elem_size, tsize.height) )); 492 if( mask ) 493 { 494 IPPI_CALL( copym_func( tdst->data.ptr, tdst_step, dst->data.ptr + y*dst->step, 495 dst_step, tsize, mask->data.ptr + y*mask->step, mask_step )); 496 } 497 } 498 499 __END__; 500 501 if( !local_alloc ) 502 cvFree( &buffer ); 503 } 504 505 ICV_DEF_BIN_LOG_OP_2D( CV_XOR, Xor ) 506 ICV_DEF_UN_LOG_OP_2D( CV_XOR, XorC ) 507 508 ICV_DEF_BIN_LOG_OP_2D( CV_AND, And ) 509 ICV_DEF_UN_LOG_OP_2D( CV_AND, AndC ) 510 511 ICV_DEF_BIN_LOG_OP_2D( CV_OR, Or ) 512 ICV_DEF_UN_LOG_OP_2D( CV_OR, OrC ) 513 514 515 ///////////////////////////////////////////////////////////////////////////////////////// 516 // X O R // 517 ///////////////////////////////////////////////////////////////////////////////////////// 518 519 CV_IMPL void 520 cvXorS( const void* src, CvScalar scalar, void* dst, const void* mask ) 521 { 522 icvLogicS( src, &scalar, dst, mask, (CvFunc2D_2A1P1I)icvXorC_8u_CnR ); 523 } 524 525 526 CV_IMPL void 527 cvXor( const void* src1, const void* src2, void* dst, const void* mask ) 528 { 529 icvLogic( src1, src2, dst, mask, (CvFunc2D_3A)icvXor_8u_C1R ); 530 } 531 532 ///////////////////////////////////////////////////////////////////////////////////////// 533 // A N D // 534 ///////////////////////////////////////////////////////////////////////////////////////// 535 536 CV_IMPL void 537 cvAndS( const void* src, CvScalar scalar, void* dst, const void* mask ) 538 { 539 icvLogicS( src, &scalar, dst, mask, (CvFunc2D_2A1P1I)icvAndC_8u_CnR ); 540 } 541 542 543 CV_IMPL void 544 cvAnd( const void* src1, const void* src2, void* dst, const void* mask ) 545 { 546 icvLogic( src1, src2, dst, mask, (CvFunc2D_3A)icvAnd_8u_C1R ); 547 } 548 549 550 ///////////////////////////////////////////////////////////////////////////////////////// 551 // O R // 552 ///////////////////////////////////////////////////////////////////////////////////////// 553 554 CV_IMPL void 555 cvOrS( const void* src, CvScalar scalar, void* dst, const void* mask ) 556 { 557 icvLogicS( src, &scalar, dst, mask, (CvFunc2D_2A1P1I)icvOrC_8u_CnR ); 558 } 559 560 561 CV_IMPL void 562 cvOr( const void* src1, const void* src2, void* dst, const void* mask ) 563 { 564 icvLogic( src1, src2, dst, mask, (CvFunc2D_3A)icvOr_8u_C1R ); 565 } 566 567 568 ///////////////////////////////////////////////////////////////////////////////////////// 569 // N O T // 570 ///////////////////////////////////////////////////////////////////////////////////////// 571 572 573 IPCVAPI_IMPL( CvStatus, icvNot_8u_C1R, 574 ( const uchar* src1, int step1, uchar* dst, int step, CvSize size ), 575 (src1, step1, dst, step, size) ) 576 { 577 for( ; size.height--; src1 += step1, dst += step ) 578 { 579 int i = 0; 580 581 if( (((size_t)src1 | (size_t)dst) & 3) == 0 ) 582 { 583 for( ; i <= size.width - 16; i += 16 ) 584 { 585 int t0 = ~((const int*)(src1+i))[0]; 586 int t1 = ~((const int*)(src1+i))[1]; 587 588 ((int*)(dst+i))[0] = t0; 589 ((int*)(dst+i))[1] = t1; 590 591 t0 = ~((const int*)(src1+i))[2]; 592 t1 = ~((const int*)(src1+i))[3]; 593 594 ((int*)(dst+i))[2] = t0; 595 ((int*)(dst+i))[3] = t1; 596 } 597 598 for( ; i <= size.width - 4; i += 4 ) 599 { 600 int t = ~*(const int*)(src1+i); 601 *(int*)(dst+i) = t; 602 } 603 } 604 605 for( ; i < size.width; i++ ) 606 { 607 int t = ~((const uchar*)src1)[i]; 608 dst[i] = (uchar)t; 609 } 610 } 611 612 return CV_OK; 613 } 614 615 616 CV_IMPL void 617 cvNot( const void* srcarr, void* dstarr ) 618 { 619 CV_FUNCNAME( "cvNot" ); 620 621 __BEGIN__; 622 623 CvMat srcstub, *src = (CvMat*)srcarr; 624 CvMat dststub, *dst = (CvMat*)dstarr; 625 626 int coi1 = 0, coi2 = 0; 627 int type, is_nd = 0; 628 CvSize size; 629 int src_step, dst_step; 630 631 if( !CV_IS_MAT(src)) 632 { 633 if( CV_IS_MATND(src) ) 634 is_nd = 1; 635 else 636 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); 637 } 638 639 if( !CV_IS_MAT(dst)) 640 { 641 if( CV_IS_MATND(src) ) 642 is_nd = 1; 643 else 644 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); 645 } 646 647 if( is_nd ) 648 { 649 CvArr* arrs[] = { src, dst }; 650 CvMatND stubs[2]; 651 CvNArrayIterator iterator; 652 653 CV_CALL( cvInitNArrayIterator( 2, arrs, 0, stubs, &iterator )); 654 655 type = CV_MAT_TYPE(iterator.hdr[0]->type); 656 iterator.size.width *= CV_ELEM_SIZE(type); 657 658 do 659 { 660 IPPI_CALL( icvNot_8u_C1R( iterator.ptr[0], CV_STUB_STEP, 661 iterator.ptr[1], CV_STUB_STEP, 662 iterator.size )); 663 } 664 while( cvNextNArraySlice( &iterator )); 665 EXIT; 666 } 667 668 if( coi1 != 0 || coi2 != 0 ) 669 CV_ERROR( CV_BadCOI, "" ); 670 671 if( !CV_ARE_TYPES_EQ( src, dst ) ) 672 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); 673 674 if( !CV_ARE_SIZES_EQ( src, dst ) ) 675 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); 676 677 size = cvGetMatSize( src ); 678 src_step = src->step; 679 dst_step = dst->step; 680 681 if( CV_IS_MAT_CONT( src->type & dst->type )) 682 { 683 size.width *= size.height; 684 src_step = dst_step = CV_STUB_STEP; 685 size.height = 1; 686 } 687 688 type = CV_MAT_TYPE( src->type ); 689 size.width *= CV_ELEM_SIZE(type); 690 691 IPPI_CALL( icvNot_8u_C1R( src->data.ptr, src_step, dst->data.ptr, dst_step, size )); 692 693 __END__; 694 } 695 696 /* End of file. */ 697