1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // Intel License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000, Intel Corporation, all rights reserved. 14 // Third party copyrights are property of their respective owners. 15 // 16 // Redistribution and use in source and binary forms, with or without modification, 17 // are permitted provided that the following conditions are met: 18 // 19 // * Redistribution's of source code must retain the above copyright notice, 20 // this list of conditions and the following disclaimer. 21 // 22 // * Redistribution's in binary form must reproduce the above copyright notice, 23 // this list of conditions and the following disclaimer in the documentation 24 // and/or other materials provided with the distribution. 25 // 26 // * The name of Intel Corporation may not be used to endorse or promote products 27 // derived from this software without specific prior written permission. 28 // 29 // This software is provided by the copyright holders and contributors "as is" and 30 // any express or implied warranties, including, but not limited to, the implied 31 // warranties of merchantability and fitness for a particular purpose are disclaimed. 32 // In no event shall the Intel Corporation or contributors be liable for any direct, 33 // indirect, incidental, special, exemplary, or consequential damages 34 // (including, but not limited to, procurement of substitute goods or services; 35 // loss of use, data, or profits; or business interruption) however caused 36 // and on any theory of liability, whether in contract, strict liability, 37 // or tort (including negligence or otherwise) arising in any way out of 38 // the use of this software, even if advised of the possibility of such damage. 39 // 40 //M*/ 41 42 #include "_cv.h" 43 44 /****************************************************************************************\ 45 Down-sampling pyramids core functions 46 \****************************************************************************************/ 47 48 //////////// Filtering macros ///////////// 49 50 /* COMMON CASE */ 51 /* 1/16[1 4 6 4 1] */ 52 /* ...| x0 | x1 | x2 | x3 | x4 |... */ 53 #define PD_FILTER( x0, x1, x2, x3, x4 ) ((x2)*6+((x1)+(x3))*4+(x0)+(x4)) 54 55 /* MACROS FOR BORDERS */ 56 57 /* | b I a | b | reflection used ("I" denotes the image boundary) */ 58 59 /* LEFT/TOP */ 60 /* 1/16[1 4 6 4 1] */ 61 /* | x2 | x1 I x0 | x1 | x2 |... */ 62 #define PD_LT(x0,x1,x2) ((x0)*6 + (x1)*8 + (x2)*2) 63 64 /* RIGHT/BOTTOM */ 65 /* 1/16[1 4 6 4 1] */ 66 /* ...| x0 | x1 | x2 | x3 I x2 | */ 67 #define PD_RB(x0,x1,x2,x3) ((x0) + ((x1) + (x3))*4 + (x2)*7) 68 69 /* SINGULAR CASE ( width == 2 || height == 2 ) */ 70 /* 1/16[1 4 6 4 1] */ 71 /* | x0 | x1 I x0 | x1 I x0 | */ 72 #define PD_SINGULAR(x0,x1) (((x0) + (x1))*8) 73 74 #define PD_SCALE_INT(x) (((x) + (1<<7)) >> 8) 75 #define PD_SCALE_FLT(x) ((x)*0.00390625f) 76 77 #define PD_SZ 5 78 79 ////////// generic macro //////////// 80 81 #define ICV_DEF_PYR_DOWN_FUNC( flavor, type, worktype, _pd_scale_ ) \ 82 static CvStatus CV_STDCALL \ 83 icvPyrDownG5x5_##flavor##_CnR( const type* src, int srcstep, type* dst, \ 84 int dststep, CvSize size, void *buf, int Cs ) \ 85 { \ 86 worktype* buffer = (worktype*)buf; /* pointer to temporary buffer */ \ 87 worktype* rows[PD_SZ]; /* array of rows pointers. dim(rows) is PD_SZ */ \ 88 int y, top_row = 0; \ 89 int Wd = size.width/2, Wdn = Wd*Cs; \ 90 int buffer_step = Wdn; \ 91 int pd_sz = (PD_SZ + 1)*buffer_step; \ 92 int fst = 0, lst = size.height <= PD_SZ/2 ? size.height : PD_SZ/2 + 1; \ 93 \ 94 assert( Cs == 1 || Cs == 3 ); \ 95 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \ 96 \ 97 /* main loop */ \ 98 for( y = 0; y < size.height; y += 2, dst += dststep ) \ 99 { \ 100 /* set first and last indices of buffer rows which are need to be filled */ \ 101 int x, y1, k = top_row; \ 102 int x1 = buffer_step; \ 103 worktype *row01, *row23, *row4; \ 104 \ 105 /* assign rows pointers */ \ 106 for( y1 = 0; y1 < PD_SZ; y1++ ) \ 107 { \ 108 rows[y1] = buffer + k; \ 109 k += buffer_step; \ 110 k &= k < pd_sz ? -1 : 0; \ 111 } \ 112 \ 113 row01 = rows[0]; \ 114 row23 = rows[2]; \ 115 row4 = rows[4]; \ 116 \ 117 /* fill new buffer rows with filtered source (horizontal conv) */ \ 118 if( Cs == 1 ) \ 119 { \ 120 if( size.width > PD_SZ/2 ) \ 121 for( y1 = fst; y1 < lst; y1++, src += srcstep ) \ 122 { \ 123 worktype *row = rows[y1]; \ 124 \ 125 /* process left & right bounds */ \ 126 row[0] = PD_LT( src[0], src[1], src[2] ); \ 127 row[Wd-1] = PD_RB( src[Wd*2-4], src[Wd*2-3], \ 128 src[Wd*2-2], src[Wd*2-1]); \ 129 /* other points (even) */ \ 130 for( x = 1; x < Wd - 1; x++ ) \ 131 { \ 132 row[x] = PD_FILTER( src[2*x-2], src[2*x-1], src[2*x], \ 133 src[2*x+1], src[2*x+2] ); \ 134 } \ 135 } \ 136 else \ 137 for( y1 = fst; y1 < lst; y1++, src += srcstep ) \ 138 { \ 139 rows[y1][0] = PD_SINGULAR( src[0], src[1] ); \ 140 } \ 141 } \ 142 else /* Cs == 3 */ \ 143 { \ 144 for( y1 = fst; y1 < lst; y1++, src += srcstep ) \ 145 { \ 146 worktype *row = rows[y1]; \ 147 \ 148 if( size.width > PD_SZ/2 ) \ 149 { \ 150 int c; \ 151 for( c = 0; c < 3; c++ ) \ 152 { \ 153 /* process left & right bounds */ \ 154 row[c] = PD_LT( src[c], src[3+c], src[6+c] ); \ 155 row[Wdn-3+c] = PD_RB( src[Wdn*2-12+c], src[Wdn*2-9+c], \ 156 src[Wdn*2-6+c], src[Wdn*2-3+c] ); \ 157 } \ 158 /* other points (even) */ \ 159 for( x = 3; x < Wdn - 3; x += 3 ) \ 160 { \ 161 row[x] = PD_FILTER( src[2*x-6], src[2*x-3], src[2*x], \ 162 src[2*x+3], src[2*x+6] ); \ 163 row[x+1] = PD_FILTER( src[2*x-5], src[2*x-2], src[2*x+1], \ 164 src[2*x+4], src[2*x+7] ); \ 165 row[x+2] = PD_FILTER( src[2*x-4], src[2*x-1], src[2*x+2], \ 166 src[2*x+5], src[2*x+8] ); \ 167 } \ 168 } \ 169 else /* size.width <= PD_SZ/2 */ \ 170 { \ 171 row[0] = PD_SINGULAR( src[0], src[3] ); \ 172 row[1] = PD_SINGULAR( src[1], src[4] ); \ 173 row[2] = PD_SINGULAR( src[2], src[5] ); \ 174 } \ 175 } \ 176 } \ 177 \ 178 /* second pass. Do vertical conv and write results do destination image */ \ 179 if( y > 0 ) \ 180 { \ 181 if( y < size.height - PD_SZ/2 ) \ 182 { \ 183 for( x = 0; x < Wdn; x++, x1++ ) \ 184 { \ 185 dst[x] = (type)_pd_scale_( PD_FILTER( row01[x], row01[x1], \ 186 row23[x], row23[x1], row4[x] )); \ 187 } \ 188 top_row += 2*buffer_step; \ 189 top_row &= top_row < pd_sz ? -1 : 0; \ 190 } \ 191 else /* bottom */ \ 192 for( x = 0; x < Wdn; x++, x1++ ) \ 193 dst[x] = (type)_pd_scale_( PD_RB( row01[x], row01[x1], \ 194 row23[x], row23[x1])); \ 195 } \ 196 else \ 197 { \ 198 if( size.height > PD_SZ/2 ) /* top */ \ 199 { \ 200 for( x = 0; x < Wdn; x++, x1++ ) \ 201 dst[x] = (type)_pd_scale_( PD_LT( row01[x], row01[x1], row23[x] )); \ 202 } \ 203 else /* size.height <= PD_SZ/2 */ \ 204 { \ 205 for( x = 0; x < Wdn; x++, x1++ ) \ 206 dst[x] = (type)_pd_scale_( PD_SINGULAR( row01[x], row01[x1] )); \ 207 } \ 208 fst = PD_SZ - 2; \ 209 } \ 210 \ 211 lst = y + 2 + PD_SZ/2 < size.height ? PD_SZ : size.height - y; \ 212 } \ 213 \ 214 return CV_OK; \ 215 } 216 217 218 ICV_DEF_PYR_DOWN_FUNC( 8u, uchar, int, PD_SCALE_INT ) 219 ICV_DEF_PYR_DOWN_FUNC( 16s, short, int, PD_SCALE_INT ) 220 ICV_DEF_PYR_DOWN_FUNC( 16u, ushort, int, PD_SCALE_INT ) 221 ICV_DEF_PYR_DOWN_FUNC( 32f, float, float, PD_SCALE_FLT ) 222 ICV_DEF_PYR_DOWN_FUNC( 64f, double, double, PD_SCALE_FLT ) 223 224 225 /****************************************************************************************\ 226 Up-sampling pyramids core functions 227 \****************************************************************************************/ 228 229 /////////// filtering macros ////////////// 230 231 /* COMMON CASE: NON ZERO */ 232 /* 1/16[1 4 6 4 1] */ 233 /* ...| x0 | 0 | x1 | 0 | x2 |... */ 234 #define PU_FILTER( x0, x1, x2 ) ((x1)*6 + (x0) + (x2)) 235 236 /* ZERO POINT AT CENTER */ 237 /* 1/16[1 4 6 4 1] */ 238 /* ...| 0 | x0 | 0 | x1 | 0 |... */ 239 #define PU_FILTER_ZI( x0, x1 ) (((x0) + (x1))*4) 240 241 /* MACROS FOR BORDERS */ 242 243 /* | b I a | b | reflection */ 244 245 /* LEFT/TOP */ 246 /* 1/16[1 4 6 4 1] */ 247 /* | x1 | 0 I x0 | 0 | x1 |... */ 248 #define PU_LT( x0, x1 ) ((x0)*6 + (x1)*2) 249 250 /* 1/16[1 4 6 4 1] */ 251 /* | 0 I x0 | 0 | x1 | 0 |... */ 252 #define PU_LT_ZI( x0, x1 ) PU_FILTER_ZI((x0),(x1)) 253 254 /* RIGHT/BOTTOM: NON ZERO */ 255 /* 1/16[1 4 6 4 1] */ 256 /* ...| x0 | 0 | x1 | 0 I x1 | */ 257 #define PU_RB( x0, x1 ) ((x0) + (x1)*7) 258 259 /* RIGHT/BOTTOM: ZERO POINT AT CENTER */ 260 /* 1/16[1 4 6 4 1] */ 261 /* ...| 0 | x0 | 0 I x0 | 0 | */ 262 #define PU_RB_ZI( x0 ) ((x0)*8) 263 264 /* SINGULAR CASE */ 265 /* 1/16[1 4 6 4 1] */ 266 /* | x0 | 0 I x0 | 0 I x0 | */ 267 #define PU_SINGULAR( x0 ) PU_RB_ZI((x0)) /* <--| the same formulas */ 268 #define PU_SINGULAR_ZI( x0 ) PU_RB_ZI((x0)) /* <--| */ 269 270 /* x/64 - scaling in up-sampling functions */ 271 #define PU_SCALE_INT(x) (((x) + (1<<5)) >> 6) 272 #define PU_SCALE_FLT(x) ((x)*0.015625f) 273 274 #define PU_SZ 3 275 276 //////////// generic macro ///////////// 277 278 279 #define ICV_DEF_PYR_UP_FUNC( flavor, type, worktype, _pu_scale_ ) \ 280 static CvStatus CV_STDCALL \ 281 icvPyrUpG5x5_##flavor##_CnR( const type* src, int srcstep, type* dst, \ 282 int dststep, CvSize size, void *buf, int Cs ) \ 283 { \ 284 worktype *buffer = (worktype*)buf; \ 285 worktype *rows[PU_SZ]; \ 286 int y, top_row = 0; \ 287 int Wd = size.width * 2, Wdn = Wd * Cs, Wn = size.width * Cs; \ 288 int buffer_step = Wdn; \ 289 int pu_sz = PU_SZ*buffer_step; \ 290 int fst = 0, lst = size.height <= PU_SZ/2 ? size.height : PU_SZ/2 + 1; \ 291 \ 292 assert( Cs == 1 || Cs == 3 ); \ 293 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \ 294 \ 295 /* main loop */ \ 296 for( y = 0; y < size.height; y++, dst += 2 * dststep ) \ 297 { \ 298 int x, y1, k = top_row; \ 299 worktype *row0, *row1, *row2; \ 300 type *dst1; \ 301 \ 302 /* assign rows pointers */ \ 303 for( y1 = 0; y1 < PU_SZ; y1++ ) \ 304 { \ 305 rows[y1] = buffer + k; \ 306 k += buffer_step; \ 307 k &= k < pu_sz ? -1 : 0; \ 308 } \ 309 \ 310 row0 = rows[0]; \ 311 row1 = rows[1]; \ 312 row2 = rows[2]; \ 313 dst1 = dst + dststep; \ 314 \ 315 /* fill new buffer rows with filtered source (horizontal conv) */ \ 316 if( Cs == 1 ) \ 317 if( size.width > PU_SZ / 2 ) \ 318 for( y1 = fst; y1 < lst; y1++, src += srcstep ) \ 319 { \ 320 worktype *row = rows[y1]; \ 321 \ 322 /* process left & right bounds */ \ 323 row[0] = PU_LT( src[0], src[1] ); \ 324 row[1] = PU_LT_ZI( src[0], src[1] ); \ 325 row[size.width * 2 - 2] = PU_RB( src[size.width - 2], \ 326 src[size.width - 1] ); \ 327 row[size.width * 2 - 1] = PU_RB_ZI( src[size.width - 1] ); \ 328 /* other points */ \ 329 for( x = 1; x < size.width - 1; x++ ) \ 330 { \ 331 row[2 * x] = PU_FILTER( src[x - 1], src[x], src[x + 1] ); \ 332 row[2 * x + 1] = PU_FILTER_ZI( src[x], src[x + 1] ); \ 333 } \ 334 } \ 335 else /* size.width <= PU_SZ/2 */ \ 336 for( y1 = fst; y1 < lst; y1++, src += srcstep ) \ 337 { \ 338 worktype *row = rows[y1]; \ 339 worktype val = src[0]; \ 340 \ 341 row[0] = PU_SINGULAR( val ); \ 342 row[1] = PU_SINGULAR_ZI( val ); \ 343 } \ 344 else /* Cs == 3 */ \ 345 for( y1 = fst; y1 < lst; y1++, src += srcstep ) \ 346 { \ 347 worktype *row = rows[y1]; \ 348 \ 349 if( size.width > PU_SZ / 2 ) \ 350 { \ 351 int c; \ 352 \ 353 for( c = 0; c < 3; c++ ) \ 354 { \ 355 /* process left & right bounds */ \ 356 row[c] = PU_LT( src[c], src[3 + c] ); \ 357 row[3 + c] = PU_LT_ZI( src[c], src[3 + c] ); \ 358 row[Wn * 2 - 6 + c] = PU_RB( src[Wn - 6 + c], src[Wn - 3 + c]); \ 359 row[Wn * 2 - 3 + c] = PU_RB_ZI( src[Wn - 3 + c] ); \ 360 } \ 361 /* other points */ \ 362 for( x = 3; x < Wn - 3; x += 3 ) \ 363 { \ 364 row[2 * x] = PU_FILTER( src[x - 3], src[x], src[x + 3] ); \ 365 row[2 * x + 3] = PU_FILTER_ZI( src[x], src[x + 3] ); \ 366 \ 367 row[2 * x + 1] = PU_FILTER( src[x - 2], src[x + 1], src[x + 4]);\ 368 row[2 * x + 4] = PU_FILTER_ZI( src[x + 1], src[x + 4] ); \ 369 \ 370 row[2 * x + 2] = PU_FILTER( src[x - 1], src[x + 2], src[x + 5]);\ 371 row[2 * x + 5] = PU_FILTER_ZI( src[x + 2], src[x + 5] ); \ 372 } \ 373 } \ 374 else /* size.width <= PU_SZ/2 */ \ 375 { \ 376 int c; \ 377 \ 378 for( c = 0; c < 3; c++ ) \ 379 { \ 380 row[c] = PU_SINGULAR( src[c] ); \ 381 row[3 + c] = PU_SINGULAR_ZI( src[c] ); \ 382 } \ 383 } \ 384 } \ 385 \ 386 /* second pass. Do vertical conv and write results do destination image */ \ 387 if( y > 0 ) \ 388 { \ 389 if( y < size.height - PU_SZ / 2 ) \ 390 { \ 391 for( x = 0; x < Wdn; x++ ) \ 392 { \ 393 dst[x] = (type)_pu_scale_( PU_FILTER( row0[x], row1[x], row2[x] )); \ 394 dst1[x] = (type)_pu_scale_( PU_FILTER_ZI( row1[x], row2[x] )); \ 395 } \ 396 top_row += buffer_step; \ 397 top_row &= top_row < pu_sz ? -1 : 0; \ 398 } \ 399 else /* bottom */ \ 400 for( x = 0; x < Wdn; x++ ) \ 401 { \ 402 dst[x] = (type)_pu_scale_( PU_RB( row0[x], row1[x] )); \ 403 dst1[x] = (type)_pu_scale_( PU_RB_ZI( row1[x] )); \ 404 } \ 405 } \ 406 else \ 407 { \ 408 if( size.height > PU_SZ / 2 ) /* top */ \ 409 for( x = 0; x < Wdn; x++ ) \ 410 { \ 411 dst[x] = (type)_pu_scale_( PU_LT( row0[x], row1[x] )); \ 412 dst1[x] = (type)_pu_scale_( PU_LT_ZI( row0[x], row1[x] )); \ 413 } \ 414 else /* size.height <= PU_SZ/2 */ \ 415 for( x = 0; x < Wdn; x++ ) \ 416 { \ 417 dst[x] = (type)_pu_scale_( PU_SINGULAR( row0[x] )); \ 418 dst1[x] = (type)_pu_scale_( PU_SINGULAR_ZI( row0[x] )); \ 419 } \ 420 fst = PU_SZ - 1; \ 421 } \ 422 \ 423 lst = y < size.height - PU_SZ/2 - 1 ? PU_SZ : size.height + PU_SZ/2 - y - 1; \ 424 } \ 425 \ 426 return CV_OK; \ 427 } 428 429 430 ICV_DEF_PYR_UP_FUNC( 8u, uchar, int, PU_SCALE_INT ) 431 ICV_DEF_PYR_UP_FUNC( 16s, short, int, PU_SCALE_INT ) 432 ICV_DEF_PYR_UP_FUNC( 16u, ushort, int, PU_SCALE_INT ) 433 ICV_DEF_PYR_UP_FUNC( 32f, float, float, PU_SCALE_FLT ) 434 ICV_DEF_PYR_UP_FUNC( 64f, double, double, PU_SCALE_FLT ) 435 436 437 static CvStatus CV_STDCALL 438 icvPyrUpG5x5_GetBufSize( int roiWidth, CvDataType dataType, 439 int channels, int *bufSize ) 440 { 441 int bufStep; 442 443 if( !bufSize ) 444 return CV_NULLPTR_ERR; 445 *bufSize = 0; 446 447 if( roiWidth < 0 ) 448 return CV_BADSIZE_ERR; 449 if( channels != 1 && channels != 3 ) 450 return CV_UNSUPPORTED_CHANNELS_ERR; 451 452 bufStep = 2*roiWidth*channels; 453 454 if( dataType == cv64f ) 455 bufStep *= sizeof(double); 456 else 457 bufStep *= sizeof(int); 458 459 *bufSize = bufStep * PU_SZ; 460 return CV_OK; 461 } 462 463 464 static CvStatus CV_STDCALL 465 icvPyrDownG5x5_GetBufSize( int roiWidth, CvDataType dataType, 466 int channels, int *bufSize ) 467 { 468 int bufStep; 469 470 if( !bufSize ) 471 return CV_NULLPTR_ERR; 472 *bufSize = 0; 473 474 if( roiWidth < 0 || (roiWidth & 1) != 0 ) 475 return CV_BADSIZE_ERR; 476 if( channels != 1 && channels != 3 ) 477 return CV_UNSUPPORTED_CHANNELS_ERR; 478 479 bufStep = 2*roiWidth*channels; 480 481 if( dataType == cv64f ) 482 bufStep *= sizeof(double); 483 else 484 bufStep *= sizeof(int); 485 486 *bufSize = bufStep * (PD_SZ + 1); 487 return CV_OK; 488 } 489 490 /****************************************************************************************\ 491 Downsampled image border completion 492 \****************************************************************************************/ 493 494 #define ICV_DEF_PYR_BORDER_FUNC( flavor, arrtype, worktype, _pd_scale_ ) \ 495 static CvStatus CV_STDCALL \ 496 icvPyrDownBorder_##flavor##_CnR( const arrtype *src, int src_step, CvSize src_size, \ 497 arrtype *dst, int dst_step, CvSize dst_size, int channels ) \ 498 { \ 499 int local_alloc = 0; \ 500 worktype *buf = 0, *buf0 = 0; \ 501 const arrtype* src2; \ 502 arrtype* dst2; \ 503 int buf_size; \ 504 int i, j; \ 505 int W = src_size.width, H = src_size.height; \ 506 int Wd = dst_size.width, Hd = dst_size.height; \ 507 int Wd_, Hd_; \ 508 int Wn = W*channels; \ 509 int bufW; \ 510 int cols, rows; /* columns and rows to modify */ \ 511 \ 512 assert( channels == 1 || channels == 3 ); \ 513 \ 514 buf_size = MAX(src_size.width,src_size.height) * sizeof(buf[0]) * 2 * channels; \ 515 if( buf_size > (1 << 14) ) \ 516 { \ 517 buf = (worktype*)cvAlloc( buf_size ); \ 518 if( !buf ) \ 519 return CV_OUTOFMEM_ERR; \ 520 } \ 521 else \ 522 { \ 523 buf = (worktype*)cvAlignPtr(alloca( buf_size+8 ), 8); \ 524 local_alloc = 1; \ 525 } \ 526 \ 527 buf0 = buf; \ 528 \ 529 src_step /= sizeof(src[0]); \ 530 dst_step /= sizeof(dst[0]); \ 531 \ 532 cols = (W & 1) + (Wd*2 > W); \ 533 rows = (H & 1) + (Hd*2 > H); \ 534 \ 535 src2 = src + (H-1)*src_step; \ 536 dst2 = dst + (Hd - rows)*dst_step; \ 537 src += (W - 1)*channels; \ 538 dst += (Wd - cols)*channels; \ 539 \ 540 /* part of row(column) from 1 to Wd_(Hd_) is processed using PD_FILTER macro */ \ 541 Wd_ = Wd - 1 + (cols == 1 && (W & 1) != 0); \ 542 Hd_ = Hd - 1 + (rows == 1 && (H & 1) != 0); \ 543 \ 544 bufW = channels * cols; \ 545 \ 546 /******************* STAGE 1. ******************/ \ 547 \ 548 /* do horizontal convolution of the 1-2 right columns and write results to buffer */\ 549 if( cols > 0 ) \ 550 { \ 551 if( W <= 2 ) \ 552 { \ 553 assert( Wd == 1 ); \ 554 for( i = 0; i < H; i++, src += src_step, buf += channels ) \ 555 { \ 556 if( channels == 1 ) \ 557 buf[0] = PD_SINGULAR( src[1-Wn], src[0] ); \ 558 else \ 559 { \ 560 buf[0] = PD_SINGULAR( src[3-Wn], src[0] ); \ 561 buf[1] = PD_SINGULAR( src[4-Wn], src[1] ); \ 562 buf[2] = PD_SINGULAR( src[5-Wn], src[2] ); \ 563 } \ 564 } \ 565 } \ 566 else if( (W == 3 && Wd == 1) || (W > 3 && !(Wd & 1)) ) \ 567 { \ 568 for( i = 0; i < H; i++, src += src_step, buf += channels ) \ 569 { \ 570 if( channels == 1 ) \ 571 buf[0] = PD_LT( src[-2], src[-1], src[0] ); \ 572 else \ 573 { \ 574 buf[0] = PD_LT( src[-6], src[-3], src[0] ); \ 575 buf[1] = PD_LT( src[-5], src[-2], src[1] ); \ 576 buf[2] = PD_LT( src[-4], src[-1], src[2] ); \ 577 } \ 578 } \ 579 } \ 580 else if( W == 3 ) \ 581 { \ 582 for( i = 0; i < H; i++, src += src_step, buf += channels*2 ) \ 583 { \ 584 if( channels == 1 ) \ 585 { \ 586 buf[0] = PD_LT( src[-2], src[-1], src[0] ); \ 587 buf[1] = PD_LT( src[0], src[-1], src[-2] ); \ 588 } \ 589 else \ 590 { \ 591 buf[0] = PD_LT( src[-6], src[-3], src[0] ); \ 592 buf[1] = PD_LT( src[-5], src[-2], src[1] ); \ 593 buf[2] = PD_LT( src[-4], src[-1], src[2] ); \ 594 buf[3] = PD_LT( src[0], src[-3], src[-6] ); \ 595 buf[4] = PD_LT( src[1], src[-2], src[-5] ); \ 596 buf[5] = PD_LT( src[2], src[-1], src[-4] ); \ 597 } \ 598 } \ 599 } \ 600 else if( cols == 1 ) \ 601 { \ 602 for( i = 0; i < H; i++, src += src_step, buf += channels ) \ 603 { \ 604 if( channels == 1 ) \ 605 buf[0] = PD_FILTER( src[-4], src[-3], src[-2], src[-1], src[0]); \ 606 else \ 607 { \ 608 buf[0] = PD_FILTER( src[-12], src[-9], src[-6], src[-3], src[0]); \ 609 buf[1] = PD_FILTER( src[-11], src[-8], src[-5], src[-2], src[1]); \ 610 buf[2] = PD_FILTER( src[-10], src[-7], src[-4], src[-1], src[2]); \ 611 } \ 612 } \ 613 } \ 614 else \ 615 { \ 616 for( i = 0; i < H; i++, src += src_step, buf += channels*2 ) \ 617 { \ 618 if( channels == 1 ) \ 619 { \ 620 buf[0] = PD_FILTER( src[-4], src[-3], src[-2], src[-1], src[0] ); \ 621 buf[1] = PD_LT( src[0], src[-1], src[-2] ); \ 622 } \ 623 else \ 624 { \ 625 buf[0] = PD_FILTER( src[-12], src[-9], src[-6], src[-3], src[0] ); \ 626 buf[1] = PD_FILTER( src[-11], src[-8], src[-5], src[-2], src[1] ); \ 627 buf[2] = PD_FILTER( src[-10], src[-7], src[-4], src[-1], src[2] ); \ 628 buf[3] = PD_LT( src[0], src[-3], src[-6] ); \ 629 buf[4] = PD_LT( src[1], src[-2], src[-5] ); \ 630 buf[5] = PD_LT( src[2], src[-1], src[-4] ); \ 631 } \ 632 } \ 633 } \ 634 buf = buf0; \ 635 } \ 636 \ 637 src = src2; \ 638 \ 639 /******************* STAGE 2. ******************/ \ 640 \ 641 /* do vertical convolution of the pre-processed right columns, */ \ 642 /* stored in buffer, and write results to the destination */ \ 643 /* do vertical convolution of the 1-2 bottom rows */ \ 644 /* and write results to the buffer */ \ 645 if( H <= 2 ) \ 646 { \ 647 if( cols > 0 ) \ 648 { \ 649 assert( Hd == 1 ); \ 650 for( j = 0; j < bufW; j++ ) \ 651 dst[j] = (arrtype)_pd_scale_( PD_SINGULAR( buf[j], buf[j+(H-1)*bufW] ));\ 652 } \ 653 \ 654 if( rows > 0 ) \ 655 { \ 656 for( j = 0; j < Wn; j++ ) \ 657 buf[j] = PD_SINGULAR( src[j-src_step], src[j] ); \ 658 } \ 659 } \ 660 else if( H == 3 ) \ 661 { \ 662 if( cols > 0 ) \ 663 { \ 664 for( j = 0; j < bufW; j++ ) \ 665 { \ 666 dst[j]= (arrtype)_pd_scale_(PD_LT( buf[j], buf[j+bufW], buf[j+bufW*2]));\ 667 } \ 668 if( Hd == 2 ) \ 669 { \ 670 dst += dst_step; \ 671 for( j = 0; j < bufW; j++ ) \ 672 dst[j] = (arrtype)_pd_scale_( PD_LT( buf[j+bufW*2], \ 673 buf[j+bufW], buf[j] )); \ 674 } \ 675 } \ 676 \ 677 if( Hd == 1 ) \ 678 { \ 679 for( j = 0; j < Wn; j++ ) \ 680 buf[j] = PD_LT( src[j-src_step*2], src[j - src_step], src[j] ); \ 681 } \ 682 else \ 683 { \ 684 for( j = 0; j < Wn; j++ ) \ 685 { \ 686 buf[j] = PD_LT( src[j-src_step*2], src[j - src_step], src[j] ); \ 687 buf[j+Wn] = PD_LT( src[j],src[j-src_step],src[j-src_step*2] ); \ 688 } \ 689 } \ 690 } \ 691 else \ 692 { \ 693 if( cols > 0 ) \ 694 { \ 695 /* top of the right border */ \ 696 for( j = 0; j < bufW; j++ ) \ 697 dst[j]=(arrtype)_pd_scale_( PD_LT( buf[j], buf[j+bufW], buf[j+bufW*2]));\ 698 \ 699 /* middle part of the right border */ \ 700 buf += bufW*2; \ 701 dst += dst_step; \ 702 for( i = 1; i < Hd_; i++, dst += dst_step, buf += bufW*2 ) \ 703 { \ 704 for( j = 0; j < bufW; j++ ) \ 705 dst[j] = (arrtype)_pd_scale_( PD_FILTER( buf[j-bufW*2], buf[j-bufW],\ 706 buf[j], buf[j+bufW], buf[j+bufW*2] ));\ 707 } \ 708 \ 709 /* bottom of the right border */ \ 710 if( !(H & 1) ) \ 711 { \ 712 for( j = 0; j < bufW; j++ ) \ 713 dst[j] = (arrtype)_pd_scale_( PD_RB( buf[j-bufW*2], buf[j-bufW], \ 714 buf[j], buf[j+bufW] )); \ 715 } \ 716 else if( rows > 1 ) \ 717 { \ 718 for( j = 0; j < bufW; j++ ) \ 719 dst[j]=(arrtype)_pd_scale_( PD_LT( buf[j-bufW*2], \ 720 buf[j-bufW], buf[j])); \ 721 } \ 722 \ 723 buf = buf0; \ 724 } \ 725 \ 726 if( rows > 0 ) \ 727 { \ 728 if( !(H & 1) ) \ 729 { \ 730 for( j = 0; j < Wn; j++ ) \ 731 buf[j] = PD_LT( src[j], src[j-src_step], src[j-src_step*2] ); \ 732 } \ 733 else if( cols == 1 ) \ 734 { \ 735 for( j = 0; j < Wn; j++ ) \ 736 buf[j] = PD_FILTER( src[j-src_step*4], src[j-src_step*3], \ 737 src[j-src_step*2], src[j-src_step], src[j] ); \ 738 } \ 739 else \ 740 { \ 741 for( j = 0; j < Wn; j++ ) \ 742 { \ 743 buf[j] = PD_FILTER( src[j-src_step*4], src[j-src_step*3], \ 744 src[j-src_step*2], src[j-src_step], src[j] ); \ 745 buf[j+Wn] = PD_LT( src[j], src[j-src_step], src[j-src_step*2] ); \ 746 } \ 747 } \ 748 } \ 749 } \ 750 \ 751 \ 752 /******************* STAGE 3. ******************/ \ 753 \ 754 /* do horizontal convolution of the pre-processed bottom rows,*/ \ 755 /* stored in buffer, and write results to the destination */ \ 756 if( rows > 0 ) \ 757 { \ 758 dst = dst2; \ 759 \ 760 if( W <= 2 ) \ 761 { \ 762 assert( Wd == 1 ); \ 763 for( ; rows--; dst += dst_step, buf += Wn ) \ 764 { \ 765 if( channels == 1 ) \ 766 dst[0] = (arrtype)_pd_scale_( PD_SINGULAR( buf[0], buf[Wn-1] )); \ 767 else \ 768 { \ 769 dst[0] = (arrtype)_pd_scale_( PD_SINGULAR( buf[0], buf[Wn-3] )); \ 770 dst[1] = (arrtype)_pd_scale_( PD_SINGULAR( buf[1], buf[Wn-2] )); \ 771 dst[2] = (arrtype)_pd_scale_( PD_SINGULAR( buf[2], buf[Wn-1] )); \ 772 } \ 773 } \ 774 } \ 775 else if( W == 3 ) \ 776 { \ 777 if( Wd == 1 ) \ 778 { \ 779 for( ; rows--; dst += dst_step, buf += Wn ) \ 780 { \ 781 if( channels == 1 ) \ 782 dst[0] = (arrtype)_pd_scale_( PD_LT(buf[0], buf[1], buf[2] )); \ 783 else \ 784 { \ 785 dst[0] = (arrtype)_pd_scale_( PD_LT(buf[0], buf[3], buf[6] )); \ 786 dst[1] = (arrtype)_pd_scale_( PD_LT(buf[1], buf[4], buf[7] )); \ 787 dst[2] = (arrtype)_pd_scale_( PD_LT(buf[2], buf[5], buf[8] )); \ 788 } \ 789 } \ 790 } \ 791 else \ 792 { \ 793 for( ; rows--; dst += dst_step, buf += Wn ) \ 794 { \ 795 if( channels == 1 ) \ 796 { \ 797 dst[0] = (arrtype)_pd_scale_( PD_LT(buf[0], buf[1], buf[2] )); \ 798 dst[1] = (arrtype)_pd_scale_( PD_LT(buf[2], buf[1], buf[0] )); \ 799 } \ 800 else \ 801 { \ 802 dst[0] = (arrtype)_pd_scale_( PD_LT(buf[0], buf[3], buf[6] )); \ 803 dst[1] = (arrtype)_pd_scale_( PD_LT(buf[1], buf[4], buf[7] )); \ 804 dst[2] = (arrtype)_pd_scale_( PD_LT(buf[2], buf[5], buf[8] )); \ 805 dst[3] = (arrtype)_pd_scale_( PD_LT(buf[6], buf[3], buf[0] )); \ 806 dst[4] = (arrtype)_pd_scale_( PD_LT(buf[7], buf[4], buf[1] )); \ 807 dst[5] = (arrtype)_pd_scale_( PD_LT(buf[8], buf[5], buf[2] )); \ 808 } \ 809 } \ 810 } \ 811 } \ 812 else \ 813 { \ 814 for( ; rows--; dst += dst_step, buf += Wn ) \ 815 { \ 816 if( channels == 1 ) \ 817 { \ 818 /* left part of the bottom row */ \ 819 dst[0] = (arrtype)_pd_scale_( PD_LT( buf[0], buf[1], buf[2] )); \ 820 \ 821 /* middle part of the bottom row */ \ 822 for( i = 1; i < Wd_; i++ ) \ 823 { \ 824 dst[i] = (arrtype)_pd_scale_( PD_FILTER(buf[i*2-2], buf[i*2-1], \ 825 buf[i*2],buf[i*2+1], buf[i*2+2] )); \ 826 } \ 827 \ 828 /* right part of the bottom row */ \ 829 if( !(W & 1) ) \ 830 dst[i] = (arrtype)_pd_scale_( PD_RB( buf[i*2-2],buf[i*2-1], \ 831 buf[i*2], buf[i*2+1] )); \ 832 else if( cols > 1 ) \ 833 dst[i] = (arrtype)_pd_scale_( PD_LT( buf[i*2-2], \ 834 buf[i*2-1], buf[i*2] )); \ 835 } \ 836 else \ 837 { \ 838 /* left part of the bottom row */ \ 839 dst[0] = (arrtype)_pd_scale_( PD_LT( buf[0], buf[3], buf[6] )); \ 840 dst[1] = (arrtype)_pd_scale_( PD_LT( buf[1], buf[4], buf[7] )); \ 841 dst[2] = (arrtype)_pd_scale_( PD_LT( buf[2], buf[5], buf[8] )); \ 842 \ 843 /* middle part of the bottom row */ \ 844 for( i = 3; i < Wd_*3; i++ ) \ 845 { \ 846 dst[i] = (arrtype)_pd_scale_( PD_FILTER(buf[i*2-6], buf[i*2-3], \ 847 buf[i*2],buf[i*2+3], buf[i*2+6]));\ 848 } \ 849 \ 850 /* right part of the bottom row */ \ 851 if( !(W & 1) ) \ 852 { \ 853 dst[i] = (arrtype)_pd_scale_( PD_RB( buf[i*2-6],buf[i*2-3], \ 854 buf[i*2], buf[i*2+3] )); \ 855 dst[i+1] = (arrtype)_pd_scale_( PD_RB( buf[i*2-5],buf[i*2-2], \ 856 buf[i*2+1], buf[i*2+4] )); \ 857 dst[i+2] = (arrtype)_pd_scale_( PD_RB( buf[i*2-4],buf[i*2-1], \ 858 buf[i*2+2], buf[i*2+5] )); \ 859 } \ 860 else if( cols > 1 ) \ 861 { \ 862 dst[i] = (arrtype)_pd_scale_( PD_LT( buf[i*2-6], buf[i*2-3], buf[i*2] )); \ 863 dst[i+1] = (arrtype)_pd_scale_( PD_LT( buf[i*2-5], buf[i*2-2], buf[i*2+1]));\ 864 dst[i+2] = (arrtype)_pd_scale_( PD_LT( buf[i*2-4], buf[i*2-1], buf[i*2+2]));\ 865 } \ 866 } \ 867 } \ 868 } \ 869 } \ 870 \ 871 if( !local_alloc ) \ 872 cvFree( &buf0 ); \ 873 \ 874 return CV_OK; \ 875 } 876 877 878 #define ICV_DEF_INIT_PYR_TABLE( FUNCNAME ) \ 879 static void icvInit##FUNCNAME##Table( CvFuncTable* tab ) \ 880 { \ 881 tab->fn_2d[CV_8U] = (void*)icv##FUNCNAME##_8u_CnR; \ 882 tab->fn_2d[CV_8S] = 0; \ 883 tab->fn_2d[CV_16S] = (void*)icv##FUNCNAME##_16s_CnR; \ 884 tab->fn_2d[CV_16U] = (void*)icv##FUNCNAME##_16u_CnR; \ 885 tab->fn_2d[CV_32F] = (void*)icv##FUNCNAME##_32f_CnR; \ 886 tab->fn_2d[CV_64F] = (void*)icv##FUNCNAME##_64f_CnR; \ 887 } 888 889 static void icvInitPyrDownBorderTable( CvFuncTable* tab ); 890 891 ICV_DEF_INIT_PYR_TABLE( PyrUpG5x5 ) 892 ICV_DEF_INIT_PYR_TABLE( PyrDownG5x5 ) 893 894 typedef CvStatus (CV_STDCALL * CvPyrDownBorderFunc)( const void* src, int srcstep, 895 CvSize srcsize, void* dst, 896 int dststep, CvSize dstsize, int cn ); 897 898 ////////////////////////////// IPP pyramid functions ///////////////////////////////////// 899 900 icvPyrDown_Gauss5x5_8u_C1R_t icvPyrDown_Gauss5x5_8u_C1R_p = 0; 901 icvPyrDown_Gauss5x5_8u_C3R_t icvPyrDown_Gauss5x5_8u_C3R_p = 0; 902 icvPyrDown_Gauss5x5_32f_C1R_t icvPyrDown_Gauss5x5_32f_C1R_p = 0; 903 icvPyrDown_Gauss5x5_32f_C3R_t icvPyrDown_Gauss5x5_32f_C3R_p = 0; 904 905 icvPyrUp_Gauss5x5_8u_C1R_t icvPyrUp_Gauss5x5_8u_C1R_p = 0; 906 icvPyrUp_Gauss5x5_8u_C3R_t icvPyrUp_Gauss5x5_8u_C3R_p = 0; 907 icvPyrUp_Gauss5x5_32f_C1R_t icvPyrUp_Gauss5x5_32f_C1R_p = 0; 908 icvPyrUp_Gauss5x5_32f_C3R_t icvPyrUp_Gauss5x5_32f_C3R_p = 0; 909 910 icvPyrUpGetBufSize_Gauss5x5_t icvPyrUpGetBufSize_Gauss5x5_p = 0; 911 icvPyrDownGetBufSize_Gauss5x5_t icvPyrDownGetBufSize_Gauss5x5_p = 0; 912 913 typedef CvStatus (CV_STDCALL * CvPyramidFunc) 914 ( const void* src, int srcstep, void* dst, 915 int dststep, CvSize size, void* buffer, int cn ); 916 917 typedef CvStatus (CV_STDCALL * CvPyramidIPPFunc) 918 ( const void* src, int srcstep, void* dst, int dststep, CvSize size, void* buffer ); 919 920 ////////////////////////////////////////////////////////////////////////////////////////// 921 922 /****************************************************************************************\ 923 * External functions * 924 \****************************************************************************************/ 925 926 CV_IMPL void 927 cvPyrUp( const void* srcarr, void* dstarr, int _filter ) 928 { 929 static CvFuncTable pyrup_tab; 930 static int inittab = 0; 931 932 void *buffer = 0; 933 int local_alloc = 0; 934 935 CV_FUNCNAME( "cvPyrUp" ); 936 937 __BEGIN__; 938 939 int coi1 = 0, coi2 = 0; 940 int buffer_size = 0; 941 int type, depth, cn; 942 CvMat srcstub, *src = (CvMat*)srcarr; 943 CvMat dststub, *dst = (CvMat*)dstarr; 944 CvFilter filter = (CvFilter) _filter; 945 CvPyramidFunc func; 946 CvPyramidIPPFunc ipp_func = 0; 947 int use_ipp = 0; 948 CvSize size; 949 950 if( !inittab ) 951 { 952 icvInitPyrUpG5x5Table( &pyrup_tab ); 953 inittab = 1; 954 } 955 956 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); 957 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); 958 959 if( coi1 != 0 || coi2 != 0 ) 960 CV_ERROR( CV_BadCOI, "" ); 961 962 if( filter != CV_GAUSSIAN_5x5 ) 963 CV_ERROR( CV_StsBadArg, "this filter type not supported" ); 964 965 if( !CV_ARE_TYPES_EQ( src, dst )) 966 CV_ERROR( CV_StsUnmatchedFormats, "" ); 967 968 if( src->cols*2 != dst->cols || src->rows*2 != dst->rows ) 969 CV_ERROR( CV_StsUnmatchedSizes, "" ); 970 971 size = cvGetMatSize(src); 972 type = CV_MAT_TYPE(src->type); 973 depth = CV_MAT_DEPTH(type); 974 cn = CV_MAT_CN(type); 975 976 if( cn != 1 && cn != 3 ) 977 CV_ERROR( CV_StsUnsupportedFormat, "The images must have 1 or 3 channel" ); 978 979 func = (CvPyramidFunc)pyrup_tab.fn_2d[depth]; 980 981 if( !func ) 982 CV_ERROR( CV_StsUnsupportedFormat, "" ); 983 984 if( icvPyrUpGetBufSize_Gauss5x5_p ) 985 { 986 ipp_func = type == CV_8UC1 ? icvPyrUp_Gauss5x5_8u_C1R_p : 987 type == CV_8UC3 ? icvPyrUp_Gauss5x5_8u_C3R_p : 988 type == CV_32FC1 ? icvPyrUp_Gauss5x5_32f_C1R_p : 989 type == CV_32FC3 ? icvPyrUp_Gauss5x5_32f_C3R_p : 0; 990 991 use_ipp = ipp_func && icvPyrUpGetBufSize_Gauss5x5_p( size.width, 992 icvDepthToDataType(type), cn, &buffer_size ) >= 0; 993 } 994 995 if( !use_ipp ) 996 icvPyrUpG5x5_GetBufSize( size.width, icvDepthToDataType(type), cn, &buffer_size ); 997 998 if( buffer_size <= CV_MAX_LOCAL_SIZE ) 999 { 1000 buffer = cvStackAlloc( buffer_size ); 1001 local_alloc = 1; 1002 } 1003 else 1004 CV_CALL( buffer = cvAlloc( buffer_size )); 1005 1006 if( !use_ipp ) 1007 func( src->data.ptr, src->step, dst->data.ptr, dst->step, size, buffer, cn ); 1008 else 1009 IPPI_CALL( ipp_func( src->data.ptr, src->step ? src->step : CV_STUB_STEP, 1010 dst->data.ptr, dst->step ? dst->step : CV_STUB_STEP, size, buffer )); 1011 __END__; 1012 1013 if( buffer && !local_alloc ) 1014 cvFree( &buffer ); 1015 } 1016 1017 1018 CV_IMPL void 1019 cvPyrDown( const void* srcarr, void* dstarr, int _filter ) 1020 { 1021 static CvFuncTable pyrdown_tab; 1022 static CvFuncTable pyrdownborder_tab; 1023 static int inittab = 0; 1024 1025 void *buffer = 0; 1026 int local_alloc = 0; 1027 1028 CV_FUNCNAME( "cvPyrDown" ); 1029 1030 __BEGIN__; 1031 1032 int coi1 = 0, coi2 = 0; 1033 int buffer_size = 0; 1034 int type, depth, cn; 1035 CvMat srcstub, *src = (CvMat*)srcarr; 1036 CvMat dststub, *dst = (CvMat*)dstarr; 1037 CvFilter filter = (CvFilter) _filter; 1038 CvPyramidFunc func; 1039 CvPyramidIPPFunc ipp_func = 0; 1040 int use_ipp = 0; 1041 CvSize src_size, src_size2, dst_size; 1042 1043 if( !inittab ) 1044 { 1045 icvInitPyrDownG5x5Table( &pyrdown_tab ); 1046 icvInitPyrDownBorderTable( &pyrdownborder_tab ); 1047 inittab = 1; 1048 } 1049 1050 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); 1051 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); 1052 1053 if( coi1 != 0 || coi2 != 0 ) 1054 CV_ERROR( CV_BadCOI, "" ); 1055 1056 if( filter != CV_GAUSSIAN_5x5 ) 1057 CV_ERROR( CV_StsBadArg, "this filter type not supported" ); 1058 1059 if( !CV_ARE_TYPES_EQ( src, dst )) 1060 CV_ERROR( CV_StsUnmatchedFormats, "" ); 1061 1062 src_size = cvGetMatSize(src); 1063 dst_size = cvGetMatSize(dst); 1064 src_size2.width = src_size.width & -2; 1065 src_size2.height = src_size.height & -2; 1066 1067 if( (unsigned)(dst_size.width - src_size.width/2) > 1 || 1068 (unsigned)(dst_size.height - src_size.height/2) > 1 ) 1069 CV_ERROR( CV_StsUnmatchedSizes, "" ); 1070 1071 // current restriction of PyrDownBorder* 1072 if( (src_size.width <= 2 && dst_size.width != 1) || 1073 (src_size.height <= 2 && dst_size.height != 1) ) 1074 CV_ERROR( CV_StsUnmatchedSizes, "" ); 1075 1076 /*if( src->data.ptr == dst->data.ptr ) 1077 CV_ERROR( CV_StsInplaceNotSupported, "" );*/ 1078 1079 type = CV_MAT_TYPE(src->type); 1080 depth = CV_MAT_DEPTH(type); 1081 cn = CV_MAT_CN(type); 1082 1083 if( cn != 1 && cn != 3 ) 1084 CV_ERROR( CV_StsUnsupportedFormat, "The images must have 1 or 3 channel" ); 1085 1086 func = (CvPyramidFunc)pyrdown_tab.fn_2d[depth]; 1087 1088 if( !func ) 1089 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1090 1091 if( icvPyrDownGetBufSize_Gauss5x5_p ) 1092 { 1093 ipp_func = type == CV_8UC1 ? icvPyrDown_Gauss5x5_8u_C1R_p : 1094 type == CV_8UC3 ? icvPyrDown_Gauss5x5_8u_C3R_p : 1095 type == CV_32FC1 ? icvPyrDown_Gauss5x5_32f_C1R_p : 1096 type == CV_32FC3 ? icvPyrDown_Gauss5x5_32f_C3R_p : 0; 1097 1098 use_ipp = ipp_func && icvPyrDownGetBufSize_Gauss5x5_p( src_size2.width, 1099 icvDepthToDataType(type), cn, &buffer_size ) >= 0; 1100 } 1101 1102 if( !use_ipp ) 1103 icvPyrDownG5x5_GetBufSize( src_size2.width, 1104 icvDepthToDataType(type), cn, &buffer_size ); 1105 1106 if( buffer_size <= CV_MAX_LOCAL_SIZE ) 1107 { 1108 buffer = cvStackAlloc( buffer_size ); 1109 local_alloc = 1; 1110 } 1111 else 1112 CV_CALL( buffer = cvAlloc( buffer_size )); 1113 1114 if( !use_ipp ) 1115 func( src->data.ptr, src->step, dst->data.ptr, 1116 dst->step, src_size2, buffer, cn ); 1117 else 1118 IPPI_CALL( ipp_func( src->data.ptr, src->step ? src->step : CV_STUB_STEP, 1119 dst->data.ptr, dst->step ? dst->step : CV_STUB_STEP, src_size2, buffer )); 1120 1121 if( src_size.width != dst_size.width*2 || src_size.height != dst_size.height*2 ) 1122 { 1123 CvPyrDownBorderFunc border_func = (CvPyrDownBorderFunc) 1124 pyrdownborder_tab.fn_2d[CV_MAT_DEPTH(type)]; 1125 1126 if( !border_func ) 1127 CV_ERROR( CV_StsUnsupportedFormat, "" ); 1128 1129 IPPI_CALL( border_func( src->data.ptr, src->step, src_size, 1130 dst->data.ptr, dst->step, dst_size, CV_MAT_CN(type) )); 1131 } 1132 1133 __END__; 1134 1135 if( buffer && !local_alloc ) 1136 cvFree( &buffer ); 1137 } 1138 1139 1140 CV_IMPL void 1141 cvReleasePyramid( CvMat*** _pyramid, int extra_layers ) 1142 { 1143 CV_FUNCNAME( "cvReleasePyramid" ); 1144 1145 __BEGIN__; 1146 1147 CvMat** pyramid; 1148 int i; 1149 1150 if( !_pyramid ) 1151 CV_ERROR( CV_StsNullPtr, "" ); 1152 1153 pyramid = *_pyramid; 1154 1155 if( pyramid ) 1156 { 1157 for( i = 0; i <= extra_layers; i++ ) 1158 cvReleaseMat( &pyramid[i] ); 1159 } 1160 1161 cvFree( _pyramid ); 1162 1163 __END__; 1164 } 1165 1166 1167 CV_IMPL CvMat** 1168 cvCreatePyramid( const CvArr* srcarr, int extra_layers, double rate, 1169 const CvSize* layer_sizes, CvArr* bufarr, 1170 int calc, int filter ) 1171 { 1172 CvMat** pyramid = 0; 1173 const float eps = 0.1f; 1174 1175 CV_FUNCNAME( "cvCreatePyramid" ); 1176 1177 __BEGIN__; 1178 1179 int i, elem_size, layer_step; 1180 CvMat stub, *src; 1181 CvSize size, layer_size; 1182 uchar* ptr = 0; 1183 1184 CV_CALL( src = cvGetMat( srcarr, &stub )); 1185 1186 if( extra_layers < 0 ) 1187 CV_ERROR( CV_StsOutOfRange, "The number of extra layers must be non negative" ); 1188 1189 elem_size = CV_ELEM_SIZE(src->type); 1190 size = cvGetMatSize(src); 1191 1192 if( bufarr ) 1193 { 1194 CvMat bstub, *buf; 1195 int bufsize = 0; 1196 1197 CV_CALL( buf = cvGetMat( bufarr, &bstub )); 1198 bufsize = buf->rows*buf->cols*CV_ELEM_SIZE(buf->type); 1199 layer_size = size; 1200 for( i = 1; i <= extra_layers; i++ ) 1201 { 1202 if( !layer_sizes ) 1203 { 1204 layer_size.width = cvRound(layer_size.width*rate+eps); 1205 layer_size.height = cvRound(layer_size.height*rate+eps); 1206 } 1207 else 1208 layer_size = layer_sizes[i-1]; 1209 layer_step = layer_size.width*elem_size; 1210 bufsize -= layer_step*layer_size.height; 1211 } 1212 1213 if( bufsize < 0 ) 1214 CV_ERROR( CV_StsOutOfRange, "The buffer is too small to fit the pyramid" ); 1215 ptr = buf->data.ptr; 1216 } 1217 1218 CV_CALL( pyramid = (CvMat**)cvAlloc( (extra_layers+1)*sizeof(pyramid[0]) )); 1219 memset( pyramid, 0, (extra_layers+1)*sizeof(pyramid[0]) ); 1220 1221 pyramid[0] = cvCreateMatHeader( size.height, size.width, src->type ); 1222 cvSetData( pyramid[0], src->data.ptr, src->step ); 1223 layer_size = size; 1224 1225 for( i = 1; i <= extra_layers; i++ ) 1226 { 1227 if( !layer_sizes ) 1228 { 1229 layer_size.width = cvRound(layer_size.width*rate + eps); 1230 layer_size.height = cvRound(layer_size.height*rate + eps); 1231 } 1232 else 1233 layer_size = layer_sizes[i]; 1234 1235 if( bufarr ) 1236 { 1237 pyramid[i] = cvCreateMatHeader( layer_size.height, layer_size.width, src->type ); 1238 layer_step = layer_size.width*elem_size; 1239 cvSetData( pyramid[i], ptr, layer_step ); 1240 ptr += layer_step*layer_size.height; 1241 } 1242 else 1243 pyramid[i] = cvCreateMat( layer_size.height, layer_size.width, src->type ); 1244 1245 if( calc ) 1246 cvPyrDown( pyramid[i-1], pyramid[i], filter ); 1247 //cvResize( pyramid[i-1], pyramid[i], CV_INTER_LINEAR ); 1248 } 1249 1250 __END__; 1251 1252 if( cvGetErrStatus() < 0 ) 1253 cvReleasePyramid( &pyramid, extra_layers ); 1254 1255 return pyramid; 1256 } 1257 1258 1259 /* MSVC .NET 2003 spends a long time building this, thus, as the code 1260 is not performance-critical, we turn off the optimization here */ 1261 #if defined _MSC_VER && _MSC_VER > 1300 && !defined CV_ICC 1262 #pragma optimize("", off) 1263 #endif 1264 1265 ICV_DEF_PYR_BORDER_FUNC( 8u, uchar, int, PD_SCALE_INT ) 1266 ICV_DEF_PYR_BORDER_FUNC( 16u, ushort, int, PD_SCALE_INT ) 1267 ICV_DEF_PYR_BORDER_FUNC( 16s, short, int, PD_SCALE_INT ) 1268 ICV_DEF_PYR_BORDER_FUNC( 32f, float, float, PD_SCALE_FLT ) 1269 ICV_DEF_PYR_BORDER_FUNC( 64f, double, double, PD_SCALE_FLT ) 1270 1271 #define ICV_DEF_INIT_PYR_BORDER_TABLE( FUNCNAME ) \ 1272 static void icvInit##FUNCNAME##Table( CvFuncTable* tab ) \ 1273 { \ 1274 tab->fn_2d[CV_8U] = (void*)icv##FUNCNAME##_8u_CnR; \ 1275 tab->fn_2d[CV_8S] = 0; \ 1276 tab->fn_2d[CV_16U] = (void*)icv##FUNCNAME##_16u_CnR; \ 1277 tab->fn_2d[CV_16S] = (void*)icv##FUNCNAME##_16s_CnR; \ 1278 tab->fn_2d[CV_32F] = (void*)icv##FUNCNAME##_32f_CnR; \ 1279 tab->fn_2d[CV_64F] = (void*)icv##FUNCNAME##_64f_CnR; \ 1280 } 1281 1282 ICV_DEF_INIT_PYR_BORDER_TABLE( PyrDownBorder ) 1283 1284 /* End of file. */ 1285