1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // Intel License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000, Intel Corporation, all rights reserved. 14 // Third party copyrights are property of their respective owners. 15 // 16 // Redistribution and use in source and binary forms, with or without modification, 17 // are permitted provided that the following conditions are met: 18 // 19 // * Redistribution's of source code must retain the above copyright notice, 20 // this list of conditions and the following disclaimer. 21 // 22 // * Redistribution's in binary form must reproduce the above copyright notice, 23 // this list of conditions and the following disclaimer in the documentation 24 // and/or other materials provided with the distribution. 25 // 26 // * The name of Intel Corporation may not be used to endorse or promote products 27 // derived from this software without specific prior written permission. 28 // 29 // This software is provided by the copyright holders and contributors "as is" and 30 // any express or implied warranties, including, but not limited to, the implied 31 // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 // In no event shall the Intel Corporation or contributors be liable for any direct, 33 // indirect, incidental, special, exemplary, or consequential damages 34 // (including, but not limited to, procurement of substitute goods or services; 35 // loss of use, data, or profits; or business interruption) however caused 36 // and on any theory of liability, whether in contract, strict liability, 37 // or tort (including negligence or otherwise) arising in any way out of 38 // the use of this software, even if advised of the possibility of such damage. 39 // 40 //M*/ 41 42 #include "_cxcore.h" 43 44 /****************************************************************************************\ 45 * Splitting/extracting array channels * 46 \****************************************************************************************/ 47 48 #define ICV_DEF_PX2PL2PX_ENTRY_C2( arrtype_ptr, ptr ) \ 49 arrtype_ptr plane0 = ptr[0]; \ 50 arrtype_ptr plane1 = ptr[1]; 51 52 #define ICV_DEF_PX2PL2PX_ENTRY_C3( arrtype_ptr, ptr ) \ 53 arrtype_ptr plane0 = ptr[0]; \ 54 arrtype_ptr plane1 = ptr[1]; \ 55 arrtype_ptr plane2 = ptr[2]; 56 57 #define ICV_DEF_PX2PL2PX_ENTRY_C4( arrtype_ptr, ptr ) \ 58 arrtype_ptr plane0 = ptr[0]; \ 59 arrtype_ptr plane1 = ptr[1]; \ 60 arrtype_ptr plane2 = ptr[2]; \ 61 arrtype_ptr plane3 = ptr[3]; 62 63 64 #define ICV_DEF_PX2PL_C2( arrtype, len ) \ 65 { \ 66 int j; \ 67 \ 68 for( j = 0; j < (len); j++, (src) += 2 ) \ 69 { \ 70 arrtype t0 = (src)[0]; \ 71 arrtype t1 = (src)[1]; \ 72 \ 73 plane0[j] = t0; \ 74 plane1[j] = t1; \ 75 } \ 76 plane0 += dststep; \ 77 plane1 += dststep; \ 78 } 79 80 81 #define ICV_DEF_PX2PL_C3( arrtype, len ) \ 82 { \ 83 int j; \ 84 \ 85 for( j = 0; j < (len); j++, (src) += 3 ) \ 86 { \ 87 arrtype t0 = (src)[0]; \ 88 arrtype t1 = (src)[1]; \ 89 arrtype t2 = (src)[2]; \ 90 \ 91 plane0[j] = t0; \ 92 plane1[j] = t1; \ 93 plane2[j] = t2; \ 94 } \ 95 plane0 += dststep; \ 96 plane1 += dststep; \ 97 plane2 += dststep; \ 98 } 99 100 101 #define ICV_DEF_PX2PL_C4( arrtype, len ) \ 102 { \ 103 int j; \ 104 \ 105 for( j = 0; j < (len); j++, (src) += 4 ) \ 106 { \ 107 arrtype t0 = (src)[0]; \ 108 arrtype t1 = (src)[1]; \ 109 \ 110 plane0[j] = t0; \ 111 plane1[j] = t1; \ 112 \ 113 t0 = (src)[2]; \ 114 t1 = (src)[3]; \ 115 \ 116 plane2[j] = t0; \ 117 plane3[j] = t1; \ 118 } \ 119 plane0 += dststep; \ 120 plane1 += dststep; \ 121 plane2 += dststep; \ 122 plane3 += dststep; \ 123 } 124 125 126 #define ICV_DEF_PX2PL_COI( arrtype, len, cn ) \ 127 { \ 128 int j; \ 129 \ 130 for( j = 0; j <= (len) - 4; j += 4, (src) += 4*(cn))\ 131 { \ 132 arrtype t0 = (src)[0]; \ 133 arrtype t1 = (src)[(cn)]; \ 134 \ 135 (dst)[j] = t0; \ 136 (dst)[j+1] = t1; \ 137 \ 138 t0 = (src)[(cn)*2]; \ 139 t1 = (src)[(cn)*3]; \ 140 \ 141 (dst)[j+2] = t0; \ 142 (dst)[j+3] = t1; \ 143 } \ 144 \ 145 for( ; j < (len); j++, (src) += (cn)) \ 146 { \ 147 (dst)[j] = (src)[0]; \ 148 } \ 149 } 150 151 152 #define ICV_DEF_COPY_PX2PL_FUNC_2D( arrtype, flavor, \ 153 cn, entry_macro ) \ 154 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_C##cn##P##cn##R,\ 155 ( const arrtype* src, int srcstep, \ 156 arrtype** dst, int dststep, CvSize size ), \ 157 (src, srcstep, dst, dststep, size)) \ 158 { \ 159 entry_macro(arrtype*, dst); \ 160 srcstep /= sizeof(src[0]); \ 161 dststep /= sizeof(dst[0][0]); \ 162 \ 163 for( ; size.height--; src += srcstep ) \ 164 { \ 165 ICV_DEF_PX2PL_C##cn( arrtype, size.width ); \ 166 src -= size.width*(cn); \ 167 } \ 168 \ 169 return CV_OK; \ 170 } 171 172 173 #define ICV_DEF_COPY_PX2PL_FUNC_2D_COI( arrtype, flavor )\ 174 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_CnC1CR, \ 175 ( const arrtype* src, int srcstep, arrtype* dst, int dststep,\ 176 CvSize size, int cn, int coi ), \ 177 (src, srcstep, dst, dststep, size, cn, coi)) \ 178 { \ 179 src += coi - 1; \ 180 srcstep /= sizeof(src[0]); \ 181 dststep /= sizeof(dst[0]); \ 182 \ 183 for( ; size.height--; src += srcstep, dst += dststep )\ 184 { \ 185 ICV_DEF_PX2PL_COI( arrtype, size.width, cn ); \ 186 src -= size.width*(cn); \ 187 } \ 188 \ 189 return CV_OK; \ 190 } 191 192 193 ICV_DEF_COPY_PX2PL_FUNC_2D( uchar, 8u, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 194 ICV_DEF_COPY_PX2PL_FUNC_2D( uchar, 8u, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 195 ICV_DEF_COPY_PX2PL_FUNC_2D( uchar, 8u, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 196 ICV_DEF_COPY_PX2PL_FUNC_2D( ushort, 16s, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 197 ICV_DEF_COPY_PX2PL_FUNC_2D( ushort, 16s, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 198 ICV_DEF_COPY_PX2PL_FUNC_2D( ushort, 16s, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 199 ICV_DEF_COPY_PX2PL_FUNC_2D( int, 32f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 200 ICV_DEF_COPY_PX2PL_FUNC_2D( int, 32f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 201 ICV_DEF_COPY_PX2PL_FUNC_2D( int, 32f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 202 ICV_DEF_COPY_PX2PL_FUNC_2D( int64, 64f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 203 ICV_DEF_COPY_PX2PL_FUNC_2D( int64, 64f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 204 ICV_DEF_COPY_PX2PL_FUNC_2D( int64, 64f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 205 206 207 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( uchar, 8u ) 208 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( ushort, 16s ) 209 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( int, 32f ) 210 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( int64, 64f ) 211 212 213 /****************************************************************************************\ 214 * Merging/inserting array channels * 215 \****************************************************************************************/ 216 217 218 #define ICV_DEF_PL2PX_C2( arrtype, len ) \ 219 { \ 220 int j; \ 221 \ 222 for( j = 0; j < (len); j++, (dst) += 2 )\ 223 { \ 224 arrtype t0 = plane0[j]; \ 225 arrtype t1 = plane1[j]; \ 226 \ 227 dst[0] = t0; \ 228 dst[1] = t1; \ 229 } \ 230 plane0 += srcstep; \ 231 plane1 += srcstep; \ 232 } 233 234 235 #define ICV_DEF_PL2PX_C3( arrtype, len ) \ 236 { \ 237 int j; \ 238 \ 239 for( j = 0; j < (len); j++, (dst) += 3 )\ 240 { \ 241 arrtype t0 = plane0[j]; \ 242 arrtype t1 = plane1[j]; \ 243 arrtype t2 = plane2[j]; \ 244 \ 245 dst[0] = t0; \ 246 dst[1] = t1; \ 247 dst[2] = t2; \ 248 } \ 249 plane0 += srcstep; \ 250 plane1 += srcstep; \ 251 plane2 += srcstep; \ 252 } 253 254 255 #define ICV_DEF_PL2PX_C4( arrtype, len ) \ 256 { \ 257 int j; \ 258 \ 259 for( j = 0; j < (len); j++, (dst) += 4 )\ 260 { \ 261 arrtype t0 = plane0[j]; \ 262 arrtype t1 = plane1[j]; \ 263 \ 264 dst[0] = t0; \ 265 dst[1] = t1; \ 266 \ 267 t0 = plane2[j]; \ 268 t1 = plane3[j]; \ 269 \ 270 dst[2] = t0; \ 271 dst[3] = t1; \ 272 } \ 273 plane0 += srcstep; \ 274 plane1 += srcstep; \ 275 plane2 += srcstep; \ 276 plane3 += srcstep; \ 277 } 278 279 280 #define ICV_DEF_PL2PX_COI( arrtype, len, cn ) \ 281 { \ 282 int j; \ 283 \ 284 for( j = 0; j <= (len) - 4; j += 4, (dst) += 4*(cn))\ 285 { \ 286 arrtype t0 = (src)[j]; \ 287 arrtype t1 = (src)[j+1]; \ 288 \ 289 (dst)[0] = t0; \ 290 (dst)[(cn)] = t1; \ 291 \ 292 t0 = (src)[j+2]; \ 293 t1 = (src)[j+3]; \ 294 \ 295 (dst)[(cn)*2] = t0; \ 296 (dst)[(cn)*3] = t1; \ 297 } \ 298 \ 299 for( ; j < (len); j++, (dst) += (cn)) \ 300 { \ 301 (dst)[0] = (src)[j]; \ 302 } \ 303 } 304 305 306 #define ICV_DEF_COPY_PL2PX_FUNC_2D( arrtype, flavor, cn, entry_macro ) \ 307 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_P##cn##C##cn##R, \ 308 ( const arrtype** src, int srcstep, \ 309 arrtype* dst, int dststep, CvSize size ), \ 310 (src, srcstep, dst, dststep, size)) \ 311 { \ 312 entry_macro(const arrtype*, src); \ 313 srcstep /= sizeof(src[0][0]); \ 314 dststep /= sizeof(dst[0]); \ 315 \ 316 for( ; size.height--; dst += dststep ) \ 317 { \ 318 ICV_DEF_PL2PX_C##cn( arrtype, size.width ); \ 319 dst -= size.width*(cn); \ 320 } \ 321 \ 322 return CV_OK; \ 323 } 324 325 326 #define ICV_DEF_COPY_PL2PX_FUNC_2D_COI( arrtype, flavor ) \ 327 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_C1CnCR, \ 328 ( const arrtype* src, int srcstep, \ 329 arrtype* dst, int dststep, \ 330 CvSize size, int cn, int coi ), \ 331 (src, srcstep, dst, dststep, size, cn, coi)) \ 332 { \ 333 dst += coi - 1; \ 334 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \ 335 \ 336 for( ; size.height--; src += srcstep, dst += dststep ) \ 337 { \ 338 ICV_DEF_PL2PX_COI( arrtype, size.width, cn ); \ 339 dst -= size.width*(cn); \ 340 } \ 341 \ 342 return CV_OK; \ 343 } 344 345 346 ICV_DEF_COPY_PL2PX_FUNC_2D( uchar, 8u, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 347 ICV_DEF_COPY_PL2PX_FUNC_2D( uchar, 8u, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 348 ICV_DEF_COPY_PL2PX_FUNC_2D( uchar, 8u, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 349 ICV_DEF_COPY_PL2PX_FUNC_2D( ushort, 16s, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 350 ICV_DEF_COPY_PL2PX_FUNC_2D( ushort, 16s, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 351 ICV_DEF_COPY_PL2PX_FUNC_2D( ushort, 16s, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 352 ICV_DEF_COPY_PL2PX_FUNC_2D( int, 32f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 353 ICV_DEF_COPY_PL2PX_FUNC_2D( int, 32f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 354 ICV_DEF_COPY_PL2PX_FUNC_2D( int, 32f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 355 ICV_DEF_COPY_PL2PX_FUNC_2D( int64, 64f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 ) 356 ICV_DEF_COPY_PL2PX_FUNC_2D( int64, 64f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 ) 357 ICV_DEF_COPY_PL2PX_FUNC_2D( int64, 64f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 ) 358 359 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( uchar, 8u ) 360 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( ushort, 16s ) 361 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( int, 32f ) 362 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( int64, 64f ) 363 364 365 #define ICV_DEF_PXPLPX_TAB( name, FROM, TO ) \ 366 static void \ 367 name( CvBigFuncTable* tab ) \ 368 { \ 369 tab->fn_2d[CV_8UC2] = (void*)icvCopy##_8u_##FROM##2##TO##2R; \ 370 tab->fn_2d[CV_8UC3] = (void*)icvCopy##_8u_##FROM##3##TO##3R; \ 371 tab->fn_2d[CV_8UC4] = (void*)icvCopy##_8u_##FROM##4##TO##4R; \ 372 \ 373 tab->fn_2d[CV_8SC2] = (void*)icvCopy##_8u_##FROM##2##TO##2R; \ 374 tab->fn_2d[CV_8SC3] = (void*)icvCopy##_8u_##FROM##3##TO##3R; \ 375 tab->fn_2d[CV_8SC4] = (void*)icvCopy##_8u_##FROM##4##TO##4R; \ 376 \ 377 tab->fn_2d[CV_16UC2] = (void*)icvCopy##_16s_##FROM##2##TO##2R; \ 378 tab->fn_2d[CV_16UC3] = (void*)icvCopy##_16s_##FROM##3##TO##3R; \ 379 tab->fn_2d[CV_16UC4] = (void*)icvCopy##_16s_##FROM##4##TO##4R; \ 380 \ 381 tab->fn_2d[CV_16SC2] = (void*)icvCopy##_16s_##FROM##2##TO##2R; \ 382 tab->fn_2d[CV_16SC3] = (void*)icvCopy##_16s_##FROM##3##TO##3R; \ 383 tab->fn_2d[CV_16SC4] = (void*)icvCopy##_16s_##FROM##4##TO##4R; \ 384 \ 385 tab->fn_2d[CV_32SC2] = (void*)icvCopy##_32f_##FROM##2##TO##2R; \ 386 tab->fn_2d[CV_32SC3] = (void*)icvCopy##_32f_##FROM##3##TO##3R; \ 387 tab->fn_2d[CV_32SC4] = (void*)icvCopy##_32f_##FROM##4##TO##4R; \ 388 \ 389 tab->fn_2d[CV_32FC2] = (void*)icvCopy##_32f_##FROM##2##TO##2R; \ 390 tab->fn_2d[CV_32FC3] = (void*)icvCopy##_32f_##FROM##3##TO##3R; \ 391 tab->fn_2d[CV_32FC4] = (void*)icvCopy##_32f_##FROM##4##TO##4R; \ 392 \ 393 tab->fn_2d[CV_64FC2] = (void*)icvCopy##_64f_##FROM##2##TO##2R; \ 394 tab->fn_2d[CV_64FC3] = (void*)icvCopy##_64f_##FROM##3##TO##3R; \ 395 tab->fn_2d[CV_64FC4] = (void*)icvCopy##_64f_##FROM##4##TO##4R; \ 396 } 397 398 399 400 #define ICV_DEF_PXPLCOI_TAB( name, FROM, TO ) \ 401 static void \ 402 name( CvFuncTable* tab ) \ 403 { \ 404 tab->fn_2d[CV_8U] = (void*)icvCopy##_8u_##FROM##TO##CR; \ 405 tab->fn_2d[CV_8S] = (void*)icvCopy##_8u_##FROM##TO##CR; \ 406 tab->fn_2d[CV_16U] = (void*)icvCopy##_16s_##FROM##TO##CR; \ 407 tab->fn_2d[CV_16S] = (void*)icvCopy##_16s_##FROM##TO##CR; \ 408 tab->fn_2d[CV_32S] = (void*)icvCopy##_32f_##FROM##TO##CR; \ 409 tab->fn_2d[CV_32F] = (void*)icvCopy##_32f_##FROM##TO##CR; \ 410 tab->fn_2d[CV_64F] = (void*)icvCopy##_64f_##FROM##TO##CR; \ 411 } 412 413 414 ICV_DEF_PXPLPX_TAB( icvInitSplitRTable, C, P ) 415 ICV_DEF_PXPLCOI_TAB( icvInitSplitRCoiTable, Cn, C1 ) 416 ICV_DEF_PXPLPX_TAB( icvInitCvtPlaneToPixRTable, P, C ) 417 ICV_DEF_PXPLCOI_TAB( icvInitCvtPlaneToPixRCoiTable, C1, Cn ) 418 419 typedef CvStatus (CV_STDCALL *CvSplitFunc)( const void* src, int srcstep, 420 void** dst, int dststep, CvSize size); 421 422 typedef CvStatus (CV_STDCALL *CvExtractPlaneFunc)( const void* src, int srcstep, 423 void* dst, int dststep, 424 CvSize size, int cn, int coi ); 425 426 typedef CvStatus (CV_STDCALL *CvMergeFunc)( const void** src, int srcstep, 427 void* dst, int dststep, CvSize size); 428 429 typedef CvStatus (CV_STDCALL *CvInsertPlaneFunc)( const void* src, int srcstep, 430 void* dst, int dststep, 431 CvSize size, int cn, int coi ); 432 433 CV_IMPL void 434 cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 ) 435 { 436 static CvBigFuncTable pxpl_tab; 437 static CvFuncTable pxplcoi_tab; 438 static int inittab = 0; 439 440 CV_FUNCNAME( "cvSplit" ); 441 442 __BEGIN__; 443 444 CvMat stub[5], *dst[4], *src = (CvMat*)srcarr; 445 CvSize size; 446 void* dstptr[4] = { 0, 0, 0, 0 }; 447 int type, cn, coi = 0; 448 int i, nzplanes = 0, nzidx = -1; 449 int cont_flag; 450 int src_step, dst_step = 0; 451 452 if( !inittab ) 453 { 454 icvInitSplitRTable( &pxpl_tab ); 455 icvInitSplitRCoiTable( &pxplcoi_tab ); 456 inittab = 1; 457 } 458 459 dst[0] = (CvMat*)dstarr0; 460 dst[1] = (CvMat*)dstarr1; 461 dst[2] = (CvMat*)dstarr2; 462 dst[3] = (CvMat*)dstarr3; 463 464 CV_CALL( src = cvGetMat( src, stub + 4, &coi )); 465 466 //if( coi != 0 ) 467 // CV_ERROR( CV_BadCOI, "" ); 468 469 type = CV_MAT_TYPE( src->type ); 470 cn = CV_MAT_CN( type ); 471 472 cont_flag = src->type; 473 474 if( cn == 1 ) 475 CV_ERROR( CV_BadNumChannels, "" ); 476 477 for( i = 0; i < 4; i++ ) 478 { 479 if( dst[i] ) 480 { 481 nzplanes++; 482 nzidx = i; 483 CV_CALL( dst[i] = cvGetMat( dst[i], stub + i )); 484 if( CV_MAT_CN( dst[i]->type ) != 1 ) 485 CV_ERROR( CV_BadNumChannels, "" ); 486 if( !CV_ARE_DEPTHS_EQ( dst[i], src )) 487 CV_ERROR( CV_StsUnmatchedFormats, "" ); 488 if( !CV_ARE_SIZES_EQ( dst[i], src )) 489 CV_ERROR( CV_StsUnmatchedSizes, "" ); 490 if( nzplanes > i && i > 0 && dst[i]->step != dst[i-1]->step ) 491 CV_ERROR( CV_BadStep, "" ); 492 dst_step = dst[i]->step; 493 dstptr[nzplanes-1] = dst[i]->data.ptr; 494 495 cont_flag &= dst[i]->type; 496 } 497 } 498 499 src_step = src->step; 500 size = cvGetMatSize( src ); 501 502 if( CV_IS_MAT_CONT( cont_flag )) 503 { 504 size.width *= size.height; 505 src_step = dst_step = CV_STUB_STEP; 506 507 size.height = 1; 508 } 509 510 if( nzplanes == cn ) 511 { 512 CvSplitFunc func = (CvSplitFunc)pxpl_tab.fn_2d[type]; 513 514 if( !func ) 515 CV_ERROR( CV_StsUnsupportedFormat, "" ); 516 517 IPPI_CALL( func( src->data.ptr, src_step, dstptr, dst_step, size )); 518 } 519 else if( nzplanes == 1 ) 520 { 521 CvExtractPlaneFunc func = (CvExtractPlaneFunc)pxplcoi_tab.fn_2d[CV_MAT_DEPTH(type)]; 522 523 if( !func ) 524 CV_ERROR( CV_StsUnsupportedFormat, "" ); 525 526 IPPI_CALL( func( src->data.ptr, src_step, 527 dst[nzidx]->data.ptr, dst_step, 528 size, cn, nzidx + 1 )); 529 } 530 else 531 { 532 CV_ERROR( CV_StsBadArg, 533 "Either all output planes or only one output plane should be non zero" ); 534 } 535 536 __END__; 537 } 538 539 540 541 CV_IMPL void 542 cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2, 543 const void* srcarr3, void* dstarr ) 544 { 545 static CvBigFuncTable plpx_tab; 546 static CvFuncTable plpxcoi_tab; 547 static int inittab = 0; 548 549 CV_FUNCNAME( "cvMerge" ); 550 551 __BEGIN__; 552 553 int src_step = 0, dst_step; 554 CvMat stub[5], *src[4], *dst = (CvMat*)dstarr; 555 CvSize size; 556 const void* srcptr[4] = { 0, 0, 0, 0 }; 557 int type, cn, coi = 0; 558 int i, nzplanes = 0, nzidx = -1; 559 int cont_flag; 560 561 if( !inittab ) 562 { 563 icvInitCvtPlaneToPixRTable( &plpx_tab ); 564 icvInitCvtPlaneToPixRCoiTable( &plpxcoi_tab ); 565 inittab = 1; 566 } 567 568 src[0] = (CvMat*)srcarr0; 569 src[1] = (CvMat*)srcarr1; 570 src[2] = (CvMat*)srcarr2; 571 src[3] = (CvMat*)srcarr3; 572 573 CV_CALL( dst = cvGetMat( dst, stub + 4, &coi )); 574 575 type = CV_MAT_TYPE( dst->type ); 576 cn = CV_MAT_CN( type ); 577 578 cont_flag = dst->type; 579 580 if( cn == 1 ) 581 CV_ERROR( CV_BadNumChannels, "" ); 582 583 for( i = 0; i < 4; i++ ) 584 { 585 if( src[i] ) 586 { 587 nzplanes++; 588 nzidx = i; 589 CV_CALL( src[i] = cvGetMat( src[i], stub + i )); 590 if( CV_MAT_CN( src[i]->type ) != 1 ) 591 CV_ERROR( CV_BadNumChannels, "" ); 592 if( !CV_ARE_DEPTHS_EQ( src[i], dst )) 593 CV_ERROR( CV_StsUnmatchedFormats, "" ); 594 if( !CV_ARE_SIZES_EQ( src[i], dst )) 595 CV_ERROR( CV_StsUnmatchedSizes, "" ); 596 if( nzplanes > i && i > 0 && src[i]->step != src[i-1]->step ) 597 CV_ERROR( CV_BadStep, "" ); 598 src_step = src[i]->step; 599 srcptr[nzplanes-1] = (const void*)(src[i]->data.ptr); 600 601 cont_flag &= src[i]->type; 602 } 603 } 604 605 size = cvGetMatSize( dst ); 606 dst_step = dst->step; 607 608 if( CV_IS_MAT_CONT( cont_flag )) 609 { 610 size.width *= size.height; 611 src_step = dst_step = CV_STUB_STEP; 612 size.height = 1; 613 } 614 615 if( nzplanes == cn ) 616 { 617 CvMergeFunc func = (CvMergeFunc)plpx_tab.fn_2d[type]; 618 619 if( !func ) 620 CV_ERROR( CV_StsUnsupportedFormat, "" ); 621 622 IPPI_CALL( func( srcptr, src_step, dst->data.ptr, dst_step, size )); 623 } 624 else if( nzplanes == 1 ) 625 { 626 CvInsertPlaneFunc func = (CvInsertPlaneFunc)plpxcoi_tab.fn_2d[CV_MAT_DEPTH(type)]; 627 628 if( !func ) 629 CV_ERROR( CV_StsUnsupportedFormat, "" ); 630 631 IPPI_CALL( func( src[nzidx]->data.ptr, src_step, 632 dst->data.ptr, dst_step, 633 size, cn, nzidx + 1 )); 634 } 635 else 636 { 637 CV_ERROR( CV_StsBadArg, 638 "Either all input planes or only one input plane should be non zero" ); 639 } 640 641 __END__; 642 } 643 644 645 /****************************************************************************************\ 646 * Generalized split/merge: mixing channels * 647 \****************************************************************************************/ 648 649 #define ICV_DEF_MIX_CH_FUNC_2D( arrtype, flavor ) \ 650 static CvStatus CV_STDCALL \ 651 icvMixChannels_##flavor( const arrtype** src, int* sdelta0, \ 652 int* sdelta1, arrtype** dst, \ 653 int* ddelta0, int* ddelta1, \ 654 int n, CvSize size ) \ 655 { \ 656 int i, k; \ 657 int block_size0 = n == 1 ? size.width : 1024; \ 658 \ 659 for( ; size.height--; ) \ 660 { \ 661 int remaining = size.width; \ 662 for( ; remaining > 0; ) \ 663 { \ 664 int block_size = MIN( remaining, block_size0 ); \ 665 for( k = 0; k < n; k++ ) \ 666 { \ 667 const arrtype* s = src[k]; \ 668 arrtype* d = dst[k]; \ 669 int ds = sdelta1[k], dd = ddelta1[k]; \ 670 if( s ) \ 671 { \ 672 for( i = 0; i <= block_size - 2; i += 2, \ 673 s += ds*2, d += dd*2 ) \ 674 { \ 675 arrtype t0 = s[0], t1 = s[ds]; \ 676 d[0] = t0; d[dd] = t1; \ 677 } \ 678 if( i < block_size ) \ 679 d[0] = s[0], s += ds, d += dd; \ 680 src[k] = s; \ 681 } \ 682 else \ 683 { \ 684 for( i=0; i <= block_size-2; i+=2, d+=dd*2 )\ 685 d[0] = d[dd] = 0; \ 686 if( i < block_size ) \ 687 d[0] = 0, d += dd; \ 688 } \ 689 dst[k] = d; \ 690 } \ 691 remaining -= block_size; \ 692 } \ 693 for( k = 0; k < n; k++ ) \ 694 src[k] += sdelta0[k], dst[k] += ddelta0[k]; \ 695 } \ 696 \ 697 return CV_OK; \ 698 } 699 700 701 ICV_DEF_MIX_CH_FUNC_2D( uchar, 8u ) 702 ICV_DEF_MIX_CH_FUNC_2D( ushort, 16u ) 703 ICV_DEF_MIX_CH_FUNC_2D( int, 32s ) 704 ICV_DEF_MIX_CH_FUNC_2D( int64, 64s ) 705 706 static void 707 icvInitMixChannelsTab( CvFuncTable* tab ) 708 { 709 tab->fn_2d[CV_8U] = (void*)icvMixChannels_8u; 710 tab->fn_2d[CV_8S] = (void*)icvMixChannels_8u; 711 tab->fn_2d[CV_16U] = (void*)icvMixChannels_16u; 712 tab->fn_2d[CV_16S] = (void*)icvMixChannels_16u; 713 tab->fn_2d[CV_32S] = (void*)icvMixChannels_32s; 714 tab->fn_2d[CV_32F] = (void*)icvMixChannels_32s; 715 tab->fn_2d[CV_64F] = (void*)icvMixChannels_64s; 716 } 717 718 typedef CvStatus (CV_STDCALL * CvMixChannelsFunc)( const void** src, int* sdelta0, 719 int* sdelta1, void** dst, int* ddelta0, int* ddelta1, int n, CvSize size ); 720 721 CV_IMPL void 722 cvMixChannels( const CvArr** src, int src_count, 723 CvArr** dst, int dst_count, 724 const int* from_to, int pair_count ) 725 { 726 static CvFuncTable mixcn_tab; 727 static int inittab = 0; 728 uchar* buffer = 0; 729 int heap_alloc = 0; 730 731 CV_FUNCNAME( "cvMixChannels" ); 732 733 __BEGIN__; 734 735 CvSize size = {0,0}; 736 int depth = -1, elem_size = 1; 737 int *sdelta0 = 0, *sdelta1 = 0, *ddelta0 = 0, *ddelta1 = 0; 738 uchar **sptr = 0, **dptr = 0; 739 uchar **src0 = 0, **dst0 = 0; 740 int* src_cn = 0, *dst_cn = 0; 741 int* src_step = 0, *dst_step = 0; 742 int buf_size, i, k; 743 int cont_flag = CV_MAT_CONT_FLAG; 744 CvMixChannelsFunc func; 745 746 if( !inittab ) 747 { 748 icvInitMixChannelsTab( &mixcn_tab ); 749 inittab = 1; 750 } 751 752 src_count = MAX( src_count, 0 ); 753 754 if( !src && src_count > 0 ) 755 CV_ERROR( CV_StsNullPtr, "The input array of arrays is NULL" ); 756 757 if( !dst ) 758 CV_ERROR( CV_StsNullPtr, "The output array of arrays is NULL" ); 759 760 if( dst_count <= 0 || pair_count <= 0 ) 761 CV_ERROR( CV_StsOutOfRange, 762 "The number of output arrays and the number of copied channels must be positive" ); 763 764 if( !from_to ) 765 CV_ERROR( CV_StsNullPtr, "The array of copied channel indices is NULL" ); 766 767 buf_size = (src_count + dst_count + 2)* 768 (sizeof(src0[0]) + sizeof(src_cn[0]) + sizeof(src_step[0])) + 769 pair_count*2*(sizeof(sptr[0]) + sizeof(sdelta0[0]) + sizeof(sdelta1[0])); 770 771 if( buf_size > CV_MAX_LOCAL_SIZE ) 772 { 773 CV_CALL( buffer = (uchar*)cvAlloc( buf_size ) ); 774 heap_alloc = 1; 775 } 776 else 777 buffer = (uchar*)cvStackAlloc( buf_size ); 778 779 src0 = (uchar**)buffer; 780 dst0 = src0 + src_count; 781 src_cn = (int*)(dst0 + dst_count); 782 dst_cn = src_cn + src_count + 1; 783 src_step = dst_cn + dst_count + 1; 784 dst_step = src_step + src_count; 785 786 sptr = (uchar**)cvAlignPtr( dst_step + dst_count, (int)sizeof(void*) ); 787 dptr = sptr + pair_count; 788 sdelta0 = (int*)(dptr + pair_count); 789 sdelta1 = sdelta0 + pair_count; 790 ddelta0 = sdelta1 + pair_count; 791 ddelta1 = ddelta0 + pair_count; 792 793 src_cn[0] = dst_cn[0] = 0; 794 795 for( k = 0; k < 2; k++ ) 796 { 797 for( i = 0; i < (k == 0 ? src_count : dst_count); i++ ) 798 { 799 CvMat stub, *mat = (CvMat*)(k == 0 ? src[i] : dst[i]); 800 int cn; 801 802 if( !CV_IS_MAT(mat) ) 803 CV_CALL( mat = cvGetMat( mat, &stub )); 804 805 if( depth < 0 ) 806 { 807 depth = CV_MAT_DEPTH(mat->type); 808 elem_size = CV_ELEM_SIZE1(depth); 809 size = cvGetMatSize(mat); 810 } 811 812 if( CV_MAT_DEPTH(mat->type) != depth ) 813 CV_ERROR( CV_StsUnmatchedFormats, "All the arrays must have the same bit depth" ); 814 815 if( mat->cols != size.width || mat->rows != size.height ) 816 CV_ERROR( CV_StsUnmatchedSizes, "All the arrays must have the same size" ); 817 818 if( k == 0 ) 819 { 820 src0[i] = mat->data.ptr; 821 cn = CV_MAT_CN(mat->type); 822 src_cn[i+1] = src_cn[i] + cn; 823 src_step[i] = mat->step / elem_size - size.width * cn; 824 } 825 else 826 { 827 dst0[i] = mat->data.ptr; 828 cn = CV_MAT_CN(mat->type); 829 dst_cn[i+1] = dst_cn[i] + cn; 830 dst_step[i] = mat->step / elem_size - size.width * cn; 831 } 832 833 cont_flag &= mat->type; 834 } 835 } 836 837 if( cont_flag ) 838 { 839 size.width *= size.height; 840 size.height = 1; 841 } 842 843 for( i = 0; i < pair_count; i++ ) 844 { 845 for( k = 0; k < 2; k++ ) 846 { 847 int cn = from_to[i*2 + k]; 848 const int* cn_arr = k == 0 ? src_cn : dst_cn; 849 int a = 0, b = k == 0 ? src_count-1 : dst_count-1; 850 851 if( cn < 0 || cn >= cn_arr[b+1] ) 852 { 853 if( k == 0 && cn < 0 ) 854 { 855 sptr[i] = 0; 856 sdelta0[i] = sdelta1[i] = 0; 857 continue; 858 } 859 else 860 { 861 char err_str[100]; 862 sprintf( err_str, "channel index #%d in the array of pairs is negative " 863 "or exceeds the total number of channels in all the %s arrays", i*2+k, 864 k == 0 ? "input" : "output" ); 865 CV_ERROR( CV_StsOutOfRange, err_str ); 866 } 867 } 868 869 for( ; cn >= cn_arr[a+1]; a++ ) 870 ; 871 872 if( k == 0 ) 873 { 874 sptr[i] = src0[a] + (cn - cn_arr[a])*elem_size; 875 sdelta1[i] = cn_arr[a+1] - cn_arr[a]; 876 sdelta0[i] = src_step[a]; 877 } 878 else 879 { 880 dptr[i] = dst0[a] + (cn - cn_arr[a])*elem_size; 881 ddelta1[i] = cn_arr[a+1] - cn_arr[a]; 882 ddelta0[i] = dst_step[a]; 883 } 884 } 885 } 886 887 func = (CvMixChannelsFunc)mixcn_tab.fn_2d[depth]; 888 if( !func ) 889 CV_ERROR( CV_StsUnsupportedFormat, "The data type is not supported by the function" ); 890 891 IPPI_CALL( func( (const void**)sptr, sdelta0, sdelta1, (void**)dptr, 892 ddelta0, ddelta1, pair_count, size )); 893 894 __END__; 895 896 if( buffer && heap_alloc ) 897 cvFree( &buffer ); 898 } 899 900 901 /****************************************************************************************\ 902 * cvConvertScaleAbs * 903 \****************************************************************************************/ 904 905 #define ICV_DEF_CVT_SCALE_ABS_CASE( srctype, worktype, \ 906 scale_macro, abs_macro, cast_macro, a, b ) \ 907 \ 908 { \ 909 const srctype* _src = (const srctype*)src; \ 910 srcstep /= sizeof(_src[0]); /*dststep /= sizeof(_dst[0]);*/ \ 911 \ 912 for( ; size.height--; _src += srcstep, dst += dststep ) \ 913 { \ 914 int i; \ 915 \ 916 for( i = 0; i <= size.width - 4; i += 4 ) \ 917 { \ 918 worktype t0 = scale_macro((a)*_src[i] + (b)); \ 919 worktype t1 = scale_macro((a)*_src[i+1] + (b)); \ 920 \ 921 t0 = (worktype)abs_macro(t0); \ 922 t1 = (worktype)abs_macro(t1); \ 923 \ 924 dst[i] = cast_macro(t0); \ 925 dst[i+1] = cast_macro(t1); \ 926 \ 927 t0 = scale_macro((a)*_src[i+2] + (b)); \ 928 t1 = scale_macro((a)*_src[i+3] + (b)); \ 929 \ 930 t0 = (worktype)abs_macro(t0); \ 931 t1 = (worktype)abs_macro(t1); \ 932 \ 933 dst[i+2] = cast_macro(t0); \ 934 dst[i+3] = cast_macro(t1); \ 935 } \ 936 \ 937 for( ; i < size.width; i++ ) \ 938 { \ 939 worktype t0 = scale_macro((a)*_src[i] + (b)); \ 940 t0 = (worktype)abs_macro(t0); \ 941 dst[i] = cast_macro(t0); \ 942 } \ 943 } \ 944 } 945 946 947 #define ICV_FIX_SHIFT 15 948 #define ICV_SCALE(x) (((x) + (1 << (ICV_FIX_SHIFT-1))) >> ICV_FIX_SHIFT) 949 950 static CvStatus CV_STDCALL 951 icvCvtScaleAbsTo_8u_C1R( const uchar* src, int srcstep, 952 uchar* dst, int dststep, 953 CvSize size, double scale, double shift, 954 int param ) 955 { 956 int srctype = param; 957 int srcdepth = CV_MAT_DEPTH(srctype); 958 959 size.width *= CV_MAT_CN(srctype); 960 961 switch( srcdepth ) 962 { 963 case CV_8S: 964 case CV_8U: 965 { 966 uchar lut[256]; 967 int i; 968 double val = shift; 969 970 for( i = 0; i < 128; i++, val += scale ) 971 { 972 int t = cvRound(fabs(val)); 973 lut[i] = CV_CAST_8U(t); 974 } 975 976 if( srcdepth == CV_8S ) 977 val = -val; 978 979 for( ; i < 256; i++, val += scale ) 980 { 981 int t = cvRound(fabs(val)); 982 lut[i] = CV_CAST_8U(t); 983 } 984 985 icvLUT_Transform8u_8u_C1R( src, srcstep, dst, 986 dststep, size, lut ); 987 } 988 break; 989 case CV_16U: 990 if( fabs( scale ) <= 1. && fabs(shift) < DBL_EPSILON ) 991 { 992 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); 993 994 if( iscale == ICV_FIX_SHIFT ) 995 { 996 ICV_DEF_CVT_SCALE_ABS_CASE( ushort, int, CV_NOP, CV_IABS, 997 CV_CAST_8U, 1, 0 ); 998 } 999 else 1000 { 1001 ICV_DEF_CVT_SCALE_ABS_CASE( ushort, int, ICV_SCALE, CV_IABS, 1002 CV_CAST_8U, iscale, 0 ); 1003 } 1004 } 1005 else 1006 { 1007 ICV_DEF_CVT_SCALE_ABS_CASE( ushort, int, cvRound, CV_IABS, 1008 CV_CAST_8U, scale, shift ); 1009 } 1010 break; 1011 case CV_16S: 1012 if( fabs( scale ) <= 1. && 1013 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) 1014 { 1015 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); 1016 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); 1017 1018 if( iscale == ICV_FIX_SHIFT && ishift == 0 ) 1019 { 1020 ICV_DEF_CVT_SCALE_ABS_CASE( short, int, CV_NOP, CV_IABS, 1021 CV_CAST_8U, 1, 0 ); 1022 } 1023 else 1024 { 1025 ICV_DEF_CVT_SCALE_ABS_CASE( short, int, ICV_SCALE, CV_IABS, 1026 CV_CAST_8U, iscale, ishift ); 1027 } 1028 } 1029 else 1030 { 1031 ICV_DEF_CVT_SCALE_ABS_CASE( short, int, cvRound, CV_IABS, 1032 CV_CAST_8U, scale, shift ); 1033 } 1034 break; 1035 case CV_32S: 1036 ICV_DEF_CVT_SCALE_ABS_CASE( int, int, cvRound, CV_IABS, 1037 CV_CAST_8U, scale, shift ); 1038 break; 1039 case CV_32F: 1040 ICV_DEF_CVT_SCALE_ABS_CASE( float, int, cvRound, CV_IABS, 1041 CV_CAST_8U, scale, shift ); 1042 break; 1043 case CV_64F: 1044 ICV_DEF_CVT_SCALE_ABS_CASE( double, int, cvRound, CV_IABS, 1045 CV_CAST_8U, scale, shift ); 1046 break; 1047 default: 1048 assert(0); 1049 return CV_BADFLAG_ERR; 1050 } 1051 1052 return CV_OK; 1053 } 1054 1055 1056 CV_IMPL void 1057 cvConvertScaleAbs( const void* srcarr, void* dstarr, 1058 double scale, double shift ) 1059 { 1060 CV_FUNCNAME( "cvConvertScaleAbs" ); 1061 1062 __BEGIN__; 1063 1064 int coi1 = 0, coi2 = 0; 1065 CvMat srcstub, *src = (CvMat*)srcarr; 1066 CvMat dststub, *dst = (CvMat*)dstarr; 1067 CvSize size; 1068 int src_step, dst_step; 1069 1070 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); 1071 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); 1072 1073 if( coi1 != 0 || coi2 != 0 ) 1074 CV_ERROR( CV_BadCOI, "" ); 1075 1076 if( !CV_ARE_SIZES_EQ( src, dst )) 1077 CV_ERROR( CV_StsUnmatchedSizes, "" ); 1078 1079 if( !CV_ARE_CNS_EQ( src, dst )) 1080 CV_ERROR( CV_StsUnmatchedFormats, "" ); 1081 1082 if( CV_MAT_DEPTH( dst->type ) != CV_8U ) 1083 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1084 1085 size = cvGetMatSize( src ); 1086 src_step = src->step; 1087 dst_step = dst->step; 1088 1089 if( CV_IS_MAT_CONT( src->type & dst->type )) 1090 { 1091 size.width *= size.height; 1092 src_step = dst_step = CV_STUB_STEP; 1093 size.height = 1; 1094 } 1095 1096 IPPI_CALL( icvCvtScaleAbsTo_8u_C1R( src->data.ptr, src_step, 1097 (uchar*)(dst->data.ptr), dst_step, 1098 size, scale, shift, CV_MAT_TYPE(src->type))); 1099 __END__; 1100 } 1101 1102 /****************************************************************************************\ 1103 * cvConvertScale * 1104 \****************************************************************************************/ 1105 1106 #define ICV_DEF_CVT_SCALE_CASE( srctype, worktype, \ 1107 scale_macro, cast_macro, a, b ) \ 1108 \ 1109 { \ 1110 const srctype* _src = (const srctype*)src; \ 1111 srcstep /= sizeof(_src[0]); \ 1112 \ 1113 for( ; size.height--; _src += srcstep, dst += dststep ) \ 1114 { \ 1115 for( i = 0; i <= size.width - 4; i += 4 ) \ 1116 { \ 1117 worktype t0 = scale_macro((a)*_src[i]+(b)); \ 1118 worktype t1 = scale_macro((a)*_src[i+1]+(b)); \ 1119 \ 1120 dst[i] = cast_macro(t0); \ 1121 dst[i+1] = cast_macro(t1); \ 1122 \ 1123 t0 = scale_macro((a)*_src[i+2] + (b)); \ 1124 t1 = scale_macro((a)*_src[i+3] + (b)); \ 1125 \ 1126 dst[i+2] = cast_macro(t0); \ 1127 dst[i+3] = cast_macro(t1); \ 1128 } \ 1129 \ 1130 for( ; i < size.width; i++ ) \ 1131 { \ 1132 worktype t0 = scale_macro((a)*_src[i] + (b)); \ 1133 dst[i] = cast_macro(t0); \ 1134 } \ 1135 } \ 1136 } 1137 1138 1139 #define ICV_DEF_CVT_SCALE_FUNC_INT( flavor, dsttype, cast_macro ) \ 1140 static CvStatus CV_STDCALL \ 1141 icvCvtScaleTo_##flavor##_C1R( const uchar* src, int srcstep, \ 1142 dsttype* dst, int dststep, CvSize size, \ 1143 double scale, double shift, int param ) \ 1144 { \ 1145 int i, srctype = param; \ 1146 dsttype lut[256]; \ 1147 dststep /= sizeof(dst[0]); \ 1148 \ 1149 switch( CV_MAT_DEPTH(srctype) ) \ 1150 { \ 1151 case CV_8U: \ 1152 if( size.width*size.height >= 256 ) \ 1153 { \ 1154 double val = shift; \ 1155 for( i = 0; i < 256; i++, val += scale ) \ 1156 { \ 1157 int t = cvRound(val); \ 1158 lut[i] = cast_macro(t); \ 1159 } \ 1160 \ 1161 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \ 1162 dststep*sizeof(dst[0]), size, lut ); \ 1163 } \ 1164 else if( fabs( scale ) <= 128. && \ 1165 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) \ 1166 { \ 1167 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \ 1168 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); \ 1169 \ 1170 ICV_DEF_CVT_SCALE_CASE( uchar, int, ICV_SCALE, \ 1171 cast_macro, iscale, ishift ); \ 1172 } \ 1173 else \ 1174 { \ 1175 ICV_DEF_CVT_SCALE_CASE( uchar, int, cvRound, \ 1176 cast_macro, scale, shift ); \ 1177 } \ 1178 break; \ 1179 case CV_8S: \ 1180 if( size.width*size.height >= 256 ) \ 1181 { \ 1182 for( i = 0; i < 256; i++ ) \ 1183 { \ 1184 int t = cvRound( (schar)i*scale + shift ); \ 1185 lut[i] = cast_macro(t); \ 1186 } \ 1187 \ 1188 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \ 1189 dststep*sizeof(dst[0]), size, lut ); \ 1190 } \ 1191 else if( fabs( scale ) <= 128. && \ 1192 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) \ 1193 { \ 1194 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \ 1195 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); \ 1196 \ 1197 ICV_DEF_CVT_SCALE_CASE( schar, int, ICV_SCALE, \ 1198 cast_macro, iscale, ishift ); \ 1199 } \ 1200 else \ 1201 { \ 1202 ICV_DEF_CVT_SCALE_CASE( schar, int, cvRound, \ 1203 cast_macro, scale, shift ); \ 1204 } \ 1205 break; \ 1206 case CV_16U: \ 1207 if( fabs( scale ) <= 1. && fabs(shift) < DBL_EPSILON ) \ 1208 { \ 1209 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \ 1210 \ 1211 ICV_DEF_CVT_SCALE_CASE( ushort, int, ICV_SCALE, \ 1212 cast_macro, iscale, 0 ); \ 1213 } \ 1214 else \ 1215 { \ 1216 ICV_DEF_CVT_SCALE_CASE( ushort, int, cvRound, \ 1217 cast_macro, scale, shift ); \ 1218 } \ 1219 break; \ 1220 case CV_16S: \ 1221 if( fabs( scale ) <= 1. && \ 1222 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) \ 1223 { \ 1224 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \ 1225 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); \ 1226 \ 1227 ICV_DEF_CVT_SCALE_CASE( short, int, ICV_SCALE, \ 1228 cast_macro, iscale, ishift ); \ 1229 } \ 1230 else \ 1231 { \ 1232 ICV_DEF_CVT_SCALE_CASE( short, int, cvRound, \ 1233 cast_macro, scale, shift ); \ 1234 } \ 1235 break; \ 1236 case CV_32S: \ 1237 ICV_DEF_CVT_SCALE_CASE( int, int, cvRound, \ 1238 cast_macro, scale, shift ); \ 1239 break; \ 1240 case CV_32F: \ 1241 ICV_DEF_CVT_SCALE_CASE( float, int, cvRound, \ 1242 cast_macro, scale, shift ); \ 1243 break; \ 1244 case CV_64F: \ 1245 ICV_DEF_CVT_SCALE_CASE( double, int, cvRound, \ 1246 cast_macro, scale, shift ); \ 1247 break; \ 1248 default: \ 1249 assert(0); \ 1250 return CV_BADFLAG_ERR; \ 1251 } \ 1252 \ 1253 return CV_OK; \ 1254 } 1255 1256 1257 #define ICV_DEF_CVT_SCALE_FUNC_FLT( flavor, dsttype, cast_macro ) \ 1258 static CvStatus CV_STDCALL \ 1259 icvCvtScaleTo_##flavor##_C1R( const uchar* src, int srcstep, \ 1260 dsttype* dst, int dststep, CvSize size, \ 1261 double scale, double shift, int param ) \ 1262 { \ 1263 int i, srctype = param; \ 1264 dsttype lut[256]; \ 1265 dststep /= sizeof(dst[0]); \ 1266 \ 1267 switch( CV_MAT_DEPTH(srctype) ) \ 1268 { \ 1269 case CV_8U: \ 1270 if( size.width*size.height >= 256 ) \ 1271 { \ 1272 double val = shift; \ 1273 for( i = 0; i < 256; i++, val += scale ) \ 1274 lut[i] = (dsttype)val; \ 1275 \ 1276 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \ 1277 dststep*sizeof(dst[0]), size, lut ); \ 1278 } \ 1279 else \ 1280 { \ 1281 ICV_DEF_CVT_SCALE_CASE( uchar, double, CV_NOP, \ 1282 cast_macro, scale, shift ); \ 1283 } \ 1284 break; \ 1285 case CV_8S: \ 1286 if( size.width*size.height >= 256 ) \ 1287 { \ 1288 for( i = 0; i < 256; i++ ) \ 1289 lut[i] = (dsttype)((schar)i*scale + shift); \ 1290 \ 1291 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \ 1292 dststep*sizeof(dst[0]), size, lut ); \ 1293 } \ 1294 else \ 1295 { \ 1296 ICV_DEF_CVT_SCALE_CASE( schar, double, CV_NOP, \ 1297 cast_macro, scale, shift ); \ 1298 } \ 1299 break; \ 1300 case CV_16U: \ 1301 ICV_DEF_CVT_SCALE_CASE( ushort, double, CV_NOP, \ 1302 cast_macro, scale, shift ); \ 1303 break; \ 1304 case CV_16S: \ 1305 ICV_DEF_CVT_SCALE_CASE( short, double, CV_NOP, \ 1306 cast_macro, scale, shift ); \ 1307 break; \ 1308 case CV_32S: \ 1309 ICV_DEF_CVT_SCALE_CASE( int, double, CV_NOP, \ 1310 cast_macro, scale, shift ); \ 1311 break; \ 1312 case CV_32F: \ 1313 ICV_DEF_CVT_SCALE_CASE( float, double, CV_NOP, \ 1314 cast_macro, scale, shift ); \ 1315 break; \ 1316 case CV_64F: \ 1317 ICV_DEF_CVT_SCALE_CASE( double, double, CV_NOP, \ 1318 cast_macro, scale, shift ); \ 1319 break; \ 1320 default: \ 1321 assert(0); \ 1322 return CV_BADFLAG_ERR; \ 1323 } \ 1324 \ 1325 return CV_OK; \ 1326 } 1327 1328 1329 ICV_DEF_CVT_SCALE_FUNC_INT( 8u, uchar, CV_CAST_8U ) 1330 ICV_DEF_CVT_SCALE_FUNC_INT( 8s, schar, CV_CAST_8S ) 1331 ICV_DEF_CVT_SCALE_FUNC_INT( 16s, short, CV_CAST_16S ) 1332 ICV_DEF_CVT_SCALE_FUNC_INT( 16u, ushort, CV_CAST_16U ) 1333 ICV_DEF_CVT_SCALE_FUNC_INT( 32s, int, CV_CAST_32S ) 1334 1335 ICV_DEF_CVT_SCALE_FUNC_FLT( 32f, float, CV_CAST_32F ) 1336 ICV_DEF_CVT_SCALE_FUNC_FLT( 64f, double, CV_CAST_64F ) 1337 1338 CV_DEF_INIT_FUNC_TAB_2D( CvtScaleTo, C1R ) 1339 1340 1341 /****************************************************************************************\ 1342 * Conversion w/o scaling macros * 1343 \****************************************************************************************/ 1344 1345 #define ICV_DEF_CVT_CASE_2D( srctype, worktype, \ 1346 cast_macro1, cast_macro2 ) \ 1347 { \ 1348 const srctype* _src = (const srctype*)src; \ 1349 srcstep /= sizeof(_src[0]); \ 1350 \ 1351 for( ; size.height--; _src += srcstep, dst += dststep ) \ 1352 { \ 1353 int i; \ 1354 \ 1355 for( i = 0; i <= size.width - 4; i += 4 ) \ 1356 { \ 1357 worktype t0 = cast_macro1(_src[i]); \ 1358 worktype t1 = cast_macro1(_src[i+1]); \ 1359 \ 1360 dst[i] = cast_macro2(t0); \ 1361 dst[i+1] = cast_macro2(t1); \ 1362 \ 1363 t0 = cast_macro1(_src[i+2]); \ 1364 t1 = cast_macro1(_src[i+3]); \ 1365 \ 1366 dst[i+2] = cast_macro2(t0); \ 1367 dst[i+3] = cast_macro2(t1); \ 1368 } \ 1369 \ 1370 for( ; i < size.width; i++ ) \ 1371 { \ 1372 worktype t0 = cast_macro1(_src[i]); \ 1373 dst[i] = cast_macro2(t0); \ 1374 } \ 1375 } \ 1376 } 1377 1378 1379 #define ICV_DEF_CVT_FUNC_2D( flavor, dsttype, worktype, cast_macro2, \ 1380 srcdepth1, srctype1, cast_macro11, \ 1381 srcdepth2, srctype2, cast_macro12, \ 1382 srcdepth3, srctype3, cast_macro13, \ 1383 srcdepth4, srctype4, cast_macro14, \ 1384 srcdepth5, srctype5, cast_macro15, \ 1385 srcdepth6, srctype6, cast_macro16 ) \ 1386 static CvStatus CV_STDCALL \ 1387 icvCvtTo_##flavor##_C1R( const uchar* src, int srcstep, \ 1388 dsttype* dst, int dststep, \ 1389 CvSize size, int param ) \ 1390 { \ 1391 int srctype = param; \ 1392 dststep /= sizeof(dst[0]); \ 1393 \ 1394 switch( CV_MAT_DEPTH(srctype) ) \ 1395 { \ 1396 case srcdepth1: \ 1397 ICV_DEF_CVT_CASE_2D( srctype1, worktype, \ 1398 cast_macro11, cast_macro2 ); \ 1399 break; \ 1400 case srcdepth2: \ 1401 ICV_DEF_CVT_CASE_2D( srctype2, worktype, \ 1402 cast_macro12, cast_macro2 ); \ 1403 break; \ 1404 case srcdepth3: \ 1405 ICV_DEF_CVT_CASE_2D( srctype3, worktype, \ 1406 cast_macro13, cast_macro2 ); \ 1407 break; \ 1408 case srcdepth4: \ 1409 ICV_DEF_CVT_CASE_2D( srctype4, worktype, \ 1410 cast_macro14, cast_macro2 ); \ 1411 break; \ 1412 case srcdepth5: \ 1413 ICV_DEF_CVT_CASE_2D( srctype5, worktype, \ 1414 cast_macro15, cast_macro2 ); \ 1415 break; \ 1416 case srcdepth6: \ 1417 ICV_DEF_CVT_CASE_2D( srctype6, worktype, \ 1418 cast_macro16, cast_macro2 ); \ 1419 break; \ 1420 } \ 1421 \ 1422 return CV_OK; \ 1423 } 1424 1425 1426 ICV_DEF_CVT_FUNC_2D( 8u, uchar, int, CV_CAST_8U, 1427 CV_8S, schar, CV_NOP, 1428 CV_16U, ushort, CV_NOP, 1429 CV_16S, short, CV_NOP, 1430 CV_32S, int, CV_NOP, 1431 CV_32F, float, cvRound, 1432 CV_64F, double, cvRound ) 1433 1434 ICV_DEF_CVT_FUNC_2D( 8s, schar, int, CV_CAST_8S, 1435 CV_8U, uchar, CV_NOP, 1436 CV_16U, ushort, CV_NOP, 1437 CV_16S, short, CV_NOP, 1438 CV_32S, int, CV_NOP, 1439 CV_32F, float, cvRound, 1440 CV_64F, double, cvRound ) 1441 1442 ICV_DEF_CVT_FUNC_2D( 16u, ushort, int, CV_CAST_16U, 1443 CV_8U, uchar, CV_NOP, 1444 CV_8S, schar, CV_NOP, 1445 CV_16S, short, CV_NOP, 1446 CV_32S, int, CV_NOP, 1447 CV_32F, float, cvRound, 1448 CV_64F, double, cvRound ) 1449 1450 ICV_DEF_CVT_FUNC_2D( 16s, short, int, CV_CAST_16S, 1451 CV_8U, uchar, CV_NOP, 1452 CV_8S, schar, CV_NOP, 1453 CV_16U, ushort, CV_NOP, 1454 CV_32S, int, CV_NOP, 1455 CV_32F, float, cvRound, 1456 CV_64F, double, cvRound ) 1457 1458 ICV_DEF_CVT_FUNC_2D( 32s, int, int, CV_NOP, 1459 CV_8U, uchar, CV_NOP, 1460 CV_8S, schar, CV_NOP, 1461 CV_16U, ushort, CV_NOP, 1462 CV_16S, short, CV_NOP, 1463 CV_32F, float, cvRound, 1464 CV_64F, double, cvRound ) 1465 1466 ICV_DEF_CVT_FUNC_2D( 32f, float, float, CV_NOP, 1467 CV_8U, uchar, CV_8TO32F, 1468 CV_8S, schar, CV_8TO32F, 1469 CV_16U, ushort, CV_NOP, 1470 CV_16S, short, CV_NOP, 1471 CV_32S, int, CV_CAST_32F, 1472 CV_64F, double, CV_CAST_32F ) 1473 1474 ICV_DEF_CVT_FUNC_2D( 64f, double, double, CV_NOP, 1475 CV_8U, uchar, CV_8TO32F, 1476 CV_8S, schar, CV_8TO32F, 1477 CV_16U, ushort, CV_NOP, 1478 CV_16S, short, CV_NOP, 1479 CV_32S, int, CV_NOP, 1480 CV_32F, float, CV_NOP ) 1481 1482 CV_DEF_INIT_FUNC_TAB_2D( CvtTo, C1R ) 1483 1484 1485 typedef CvStatus (CV_STDCALL *CvCvtFunc)( const void* src, int srcstep, 1486 void* dst, int dststep, CvSize size, 1487 int param ); 1488 1489 typedef CvStatus (CV_STDCALL *CvCvtScaleFunc)( const void* src, int srcstep, 1490 void* dst, int dststep, CvSize size, 1491 double scale, double shift, 1492 int param ); 1493 1494 CV_IMPL void 1495 cvConvertScale( const void* srcarr, void* dstarr, 1496 double scale, double shift ) 1497 { 1498 static CvFuncTable cvt_tab, cvtscale_tab; 1499 static int inittab = 0; 1500 1501 CV_FUNCNAME( "cvConvertScale" ); 1502 1503 __BEGIN__; 1504 1505 int type; 1506 int is_nd = 0; 1507 CvMat srcstub, *src = (CvMat*)srcarr; 1508 CvMat dststub, *dst = (CvMat*)dstarr; 1509 CvSize size; 1510 int src_step, dst_step; 1511 int no_scale = scale == 1 && shift == 0; 1512 1513 if( !CV_IS_MAT(src) ) 1514 { 1515 if( CV_IS_MATND(src) ) 1516 is_nd = 1; 1517 else 1518 { 1519 int coi = 0; 1520 CV_CALL( src = cvGetMat( src, &srcstub, &coi )); 1521 1522 if( coi != 0 ) 1523 CV_ERROR( CV_BadCOI, "" ); 1524 } 1525 } 1526 1527 if( !CV_IS_MAT(dst) ) 1528 { 1529 if( CV_IS_MATND(dst) ) 1530 is_nd = 1; 1531 else 1532 { 1533 int coi = 0; 1534 CV_CALL( dst = cvGetMat( dst, &dststub, &coi )); 1535 1536 if( coi != 0 ) 1537 CV_ERROR( CV_BadCOI, "" ); 1538 } 1539 } 1540 1541 if( is_nd ) 1542 { 1543 CvArr* arrs[] = { src, dst }; 1544 CvMatND stubs[2]; 1545 CvNArrayIterator iterator; 1546 int dsttype; 1547 1548 CV_CALL( cvInitNArrayIterator( 2, arrs, 0, stubs, &iterator, CV_NO_DEPTH_CHECK )); 1549 1550 type = iterator.hdr[0]->type; 1551 dsttype = iterator.hdr[1]->type; 1552 iterator.size.width *= CV_MAT_CN(type); 1553 1554 if( !inittab ) 1555 { 1556 icvInitCvtToC1RTable( &cvt_tab ); 1557 icvInitCvtScaleToC1RTable( &cvtscale_tab ); 1558 inittab = 1; 1559 } 1560 1561 if( no_scale ) 1562 { 1563 CvCvtFunc func = (CvCvtFunc)(cvt_tab.fn_2d[CV_MAT_DEPTH(dsttype)]); 1564 if( !func ) 1565 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1566 1567 do 1568 { 1569 IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP, 1570 iterator.ptr[1], CV_STUB_STEP, 1571 iterator.size, type )); 1572 } 1573 while( cvNextNArraySlice( &iterator )); 1574 } 1575 else 1576 { 1577 CvCvtScaleFunc func = 1578 (CvCvtScaleFunc)(cvtscale_tab.fn_2d[CV_MAT_DEPTH(dsttype)]); 1579 if( !func ) 1580 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1581 1582 do 1583 { 1584 IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP, 1585 iterator.ptr[1], CV_STUB_STEP, 1586 iterator.size, scale, shift, type )); 1587 } 1588 while( cvNextNArraySlice( &iterator )); 1589 } 1590 EXIT; 1591 } 1592 1593 if( no_scale && CV_ARE_TYPES_EQ( src, dst ) ) 1594 { 1595 if( src != dst ) 1596 cvCopy( src, dst ); 1597 EXIT; 1598 } 1599 1600 if( !CV_ARE_SIZES_EQ( src, dst )) 1601 CV_ERROR( CV_StsUnmatchedSizes, "" ); 1602 1603 size = cvGetMatSize( src ); 1604 type = CV_MAT_TYPE(src->type); 1605 src_step = src->step; 1606 dst_step = dst->step; 1607 1608 if( CV_IS_MAT_CONT( src->type & dst->type )) 1609 { 1610 size.width *= size.height; 1611 src_step = dst_step = CV_STUB_STEP; 1612 size.height = 1; 1613 } 1614 1615 size.width *= CV_MAT_CN( type ); 1616 1617 if( CV_ARE_TYPES_EQ( src, dst ) && size.height == 1 && 1618 size.width <= CV_MAX_INLINE_MAT_OP_SIZE ) 1619 { 1620 if( CV_MAT_DEPTH(type) == CV_32F ) 1621 { 1622 const float* srcdata = (const float*)(src->data.ptr); 1623 float* dstdata = (float*)(dst->data.ptr); 1624 1625 do 1626 { 1627 dstdata[size.width - 1] = (float)(srcdata[size.width-1]*scale + shift); 1628 } 1629 while( --size.width ); 1630 1631 EXIT; 1632 } 1633 1634 if( CV_MAT_DEPTH(type) == CV_64F ) 1635 { 1636 const double* srcdata = (const double*)(src->data.ptr); 1637 double* dstdata = (double*)(dst->data.ptr); 1638 1639 do 1640 { 1641 dstdata[size.width - 1] = srcdata[size.width-1]*scale + shift; 1642 } 1643 while( --size.width ); 1644 1645 EXIT; 1646 } 1647 } 1648 1649 if( !inittab ) 1650 { 1651 icvInitCvtToC1RTable( &cvt_tab ); 1652 icvInitCvtScaleToC1RTable( &cvtscale_tab ); 1653 inittab = 1; 1654 } 1655 1656 if( !CV_ARE_CNS_EQ( src, dst )) 1657 CV_ERROR( CV_StsUnmatchedFormats, "" ); 1658 1659 if( no_scale ) 1660 { 1661 CvCvtFunc func = (CvCvtFunc)(cvt_tab.fn_2d[CV_MAT_DEPTH(dst->type)]); 1662 1663 if( !func ) 1664 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1665 1666 IPPI_CALL( func( src->data.ptr, src_step, 1667 dst->data.ptr, dst_step, size, type )); 1668 } 1669 else 1670 { 1671 CvCvtScaleFunc func = (CvCvtScaleFunc) 1672 (cvtscale_tab.fn_2d[CV_MAT_DEPTH(dst->type)]); 1673 1674 if( !func ) 1675 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1676 1677 IPPI_CALL( func( src->data.ptr, src_step, 1678 dst->data.ptr, dst_step, size, 1679 scale, shift, type )); 1680 } 1681 1682 __END__; 1683 } 1684 1685 /********************* helper functions for converting 32f<->64f ************************/ 1686 1687 IPCVAPI_IMPL( CvStatus, icvCvt_32f64f, 1688 ( const float* src, double* dst, int len ), (src, dst, len) ) 1689 { 1690 int i; 1691 for( i = 0; i <= len - 4; i += 4 ) 1692 { 1693 double t0 = src[i]; 1694 double t1 = src[i+1]; 1695 1696 dst[i] = t0; 1697 dst[i+1] = t1; 1698 1699 t0 = src[i+2]; 1700 t1 = src[i+3]; 1701 1702 dst[i+2] = t0; 1703 dst[i+3] = t1; 1704 } 1705 1706 for( ; i < len; i++ ) 1707 dst[i] = src[i]; 1708 1709 return CV_OK; 1710 } 1711 1712 1713 IPCVAPI_IMPL( CvStatus, icvCvt_64f32f, 1714 ( const double* src, float* dst, int len ), (src, dst, len) ) 1715 { 1716 int i = 0; 1717 for( ; i <= len - 4; i += 4 ) 1718 { 1719 double t0 = src[i]; 1720 double t1 = src[i+1]; 1721 1722 dst[i] = (float)t0; 1723 dst[i+1] = (float)t1; 1724 1725 t0 = src[i+2]; 1726 t1 = src[i+3]; 1727 1728 dst[i+2] = (float)t0; 1729 dst[i+3] = (float)t1; 1730 } 1731 1732 for( ; i < len; i++ ) 1733 dst[i] = (float)src[i]; 1734 1735 return CV_OK; 1736 } 1737 1738 1739 CvStatus CV_STDCALL icvScale_32f( const float* src, float* dst, int len, float a, float b ) 1740 { 1741 int i; 1742 for( i = 0; i <= len - 4; i += 4 ) 1743 { 1744 double t0 = src[i]*a + b; 1745 double t1 = src[i+1]*a + b; 1746 1747 dst[i] = (float)t0; 1748 dst[i+1] = (float)t1; 1749 1750 t0 = src[i+2]*a + b; 1751 t1 = src[i+3]*a + b; 1752 1753 dst[i+2] = (float)t0; 1754 dst[i+3] = (float)t1; 1755 } 1756 1757 for( ; i < len; i++ ) 1758 dst[i] = (float)(src[i]*a + b); 1759 1760 return CV_OK; 1761 } 1762 1763 1764 CvStatus CV_STDCALL icvScale_64f( const double* src, double* dst, int len, double a, double b ) 1765 { 1766 int i; 1767 for( i = 0; i <= len - 4; i += 4 ) 1768 { 1769 double t0 = src[i]*a + b; 1770 double t1 = src[i+1]*a + b; 1771 1772 dst[i] = t0; 1773 dst[i+1] = t1; 1774 1775 t0 = src[i+2]*a + b; 1776 t1 = src[i+3]*a + b; 1777 1778 dst[i+2] = t0; 1779 dst[i+3] = t1; 1780 } 1781 1782 for( ; i < len; i++ ) 1783 dst[i] = src[i]*a + b; 1784 1785 return CV_OK; 1786 } 1787 1788 /* End of file. */ 1789