Home | History | Annotate | Download | only in src

Lines Matching defs:src2

60         worktype t0 = __op__((src1)[i], (src2)[i]);                 \
61 worktype t1 = __op__((src1)[i+1], (src2)[i+1]); \
66 t0 = __op__((src1)[i+2],(src2)[i+2]); \
67 t1 = __op__((src1)[i+3],(src2)[i+3]); \
75 worktype t0 = __op__((src1)[i],(src2)[i]); \
82 ( const type* src1, int step1, const type* src2, int step2, \
84 (src1, step1, src2, step2, dst, step, size) ) \
86 step1/=sizeof(src1[0]); step2/=sizeof(src2[0]); step/=sizeof(dst[0]); \
90 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
92 worktype t0 = __op__((src1)[0],(src2)[0]); \
98 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
111 ( const type* src1, int step1, const type* src2, int step2, \
113 (src1, step1, src2, step2, dst, step, size, 0) ) \
115 step1/=sizeof(src1[0]); step2/=sizeof(src2[0]); step/=sizeof(dst[0]); \
119 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
121 worktype t0 = __op__((src1)[0],(src2)[0]); \
127 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
286 CvMat srcstub1, srcstub2, *src1, *src2;
297 src2 = (CvMat*)srcarr2;
299 if( !CV_IS_MAT(src1) || !CV_IS_MAT(src2) || !CV_IS_MAT(dst))
301 if( CV_IS_MATND(src1) || CV_IS_MATND(src2) || CV_IS_MATND(dst))
303 CvArr* arrs[] = { src1, src2, dst };
360 CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 ));
367 if( !CV_ARE_TYPES_EQ( src1, src2 ) || !CV_ARE_TYPES_EQ( src1, dst ))
370 if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ))
380 if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type ))
389 const float* src2data = (const float*)(src2->data.ptr);
404 const double* src2data = (const double*)(src2->data.ptr);
436 cont_flag = CV_IS_MAT_CONT( src1->type & src2->type & dst->type & mask->type );
481 src2_step = src2->step;
501 src2->data.ptr + y*src2->step, src2_step,
505 src2->data.ptr + y*src2->step, src2_step,
762 CvMat srcstub2, *src2 = (CvMat*)srcarr2;
771 if( !CV_IS_MAT(src1) || !CV_IS_MAT(src2) || !CV_IS_MAT(dst))
773 if( CV_IS_MATND(src1) || CV_IS_MATND(src2) || CV_IS_MATND(dst))
775 CvArr* arrs[] = { src1, src2, dst };
832 CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 ));
839 if( !CV_ARE_TYPES_EQ( src1, src2 ) || !CV_ARE_TYPES_EQ( src1, dst ))
842 if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ))
852 if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type ))
861 const float* src2data = (const float*)(src2->data.ptr);
876 const double* src2data = (const double*)(src2->data.ptr);
908 cont_flag = CV_IS_MAT_CONT( src1->type & src2->type & dst->type & mask->type );
953 src2_step = src2->step;
973 src2->data.ptr + y*src2->step, src2_step,
977 src2->data.ptr + y*src2->step, src2_step,
1222 const arrtype* src2, int step2, \
1226 step1 /= sizeof(src1[0]); step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); \
1230 for( ; size.height--; src1+=step1, src2+=step2, dst+=step ) \
1235 worktype t0 = src1[i] * src2[i]; \
1236 worktype t1 = src1[i+1] * src2[i+1]; \
1241 t0 = src1[i+2] * src2[i+2]; \
1242 t1 = src1[i+3] * src2[i+3]; \
1250 worktype t0 = src1[i] * src2[i]; \
1257 for( ; size.height--; src1+=step1, src2+=step2, dst+=step ) \
1262 double ft0 = scale*_cvt_macro_(src1[i])*_cvt_macro_(src2[i]); \
1263 src2[i+1]); \
1270 ft0 = scale*_cvt_macro_(src1[i+2])*_cvt_macro_(src2[i+2]); \
1271 ft1 = scale*_cvt_macro_(src1[i+3])*_cvt_macro_(src2[i+3]); \
1282 t0 = _cast_macro1_(scale*_cvt_macro_(src1[i])*_cvt_macro_(src2[i])); \
1304 const void* src2, int step2,
1322 CvMat srcstub2, *src2 = (CvMat*)srcarr2;
1345 if( !CV_IS_MAT(src2) )
1347 if( CV_IS_MATND(src2) )
1351 CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi ));
1371 CvArr* arrs[] = { src1, src2, dst };
1395 if( !CV_ARE_TYPES_EQ( src1, src2 ) || !CV_ARE_TYPES_EQ( src1, dst ))
1398 if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ))
1407 if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type ))
1416 const float* src2data = (const float*)(src2->data.ptr);
1432 const double* src2data = (const double*)(src2->data.ptr);
1452 src2_step = src2->step;
1461 IPPI_CALL( func( src1->data.ptr, src1_step, src2->data.ptr, src2_step,
1475 const arrtype* src2, int step2, \
1479 step1 /= sizeof(src1[0]); step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); \
1481 for( ; size.height--; src1+=step1, src2+=step2, dst+=step ) \
1483 _start_row_macro_(checktype, src2); \
1489 double a = (double)_cvt_macro_(src2[i]) * _cvt_macro_(src2[i+1]); \
1490 double b = (double)_cvt_macro_(src2[i+2]) * _cvt_macro_(src2[i+3]); \
1496 worktype z0 = _cast_macro1_(src2[i+1] * _cvt_macro_(src1[i]) * b); \
1497 worktype z1 = _cast_macro1_(src2[i] * _cvt_macro_(src1[i+1]) * b); \
1498 worktype z2 = _cast_macro1_(src2[i+3] * _cvt_macro_(src1[i+2]) * a); \
1499 worktype z3 = _cast_macro1_(src2[i+2] * _cvt_macro_(src1[i+3]) * a); \
1509 _cast_macro1_(_cvt_macro_(src1[i])*scale/_cvt_macro_(src2[i])) : 0; \
1511 _cast_macro1_(_cvt_macro_(src1[i+1])*scale/_cvt_macro_(src2[i+1])):0;\
1513 _cast_macro1_(_cvt_macro_(src1[i+2])*scale/_cvt_macro_(src2[i+2])):0;\
1515 _cast_macro1_(_cvt_macro_(src1[i+3])*scale/_cvt_macro_(src2[i+3])):0;\
1527 _cast_macro1_(_cvt_macro_(src1[i])*scale/_cvt_macro_(src2[i])) : 0; \
1616 cvRound, CV_CAST_8U, CV_8TO32F, CV_NONZERO, src2 )
1624 cvRound, CV_CAST_16U, CV_CAST_64F, CV_NONZERO, src2 )
1626 cvRound, CV_CAST_16S, CV_NOP, CV_NONZERO, src2 )
1628 cvRound, CV_CAST_32S, CV_CAST_64F, CV_NONZERO, src2 )
1670 CvMat srcstub2, *src2 = (CvMat*)srcarr2;
1681 if( !CV_IS_MAT(src2) )
1683 if( CV_IS_MATND(src2))
1687 CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi ));
1706 if( !CV_ARE_TYPES_EQ( src1, src2 ))
1709 if( !CV_ARE_SIZES_EQ( src1, src2 ))
1729 CvArr* arrs[] = { dst, src2, src1 };
1772 if( !CV_ARE_TYPES_EQ( src2, dst ))
1775 if( !CV_ARE_SIZES_EQ( src2, dst ))
1778 type = CV_MAT_TYPE(src2->type);
1779 size = cvGetMatSize( src2 );
1782 if( CV_IS_MAT_CONT( src1_cont_flag & src2->type & dst->type ))
1791 src2_step = src2->step;
1802 IPPI_CALL( func( src1->data.ptr, src1_step, src2->data.ptr, src2_step,
1812 IPPI_CALL( func( src2->data.ptr, src2_step,
1825 const arrtype* src2, int step2, double beta, \
1828 step1 /= sizeof(src1[0]); step2 /= sizeof(src2[0]); step /= sizeof(dst[0]); \
1830 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
1837 load_macro((src2)[i])*beta + gamma); \
1839 load_macro((src2)[i+1])*beta + gamma); \
1845 load_macro((src2)[i+2])*beta + gamma); \
1847 load_macro((src2)[i+3])*beta + gamma); \
1856 load_macro((src2)[i])*beta + gamma); \
1870 const uchar* src2, int step2, double beta,
1898 for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1904 t0 = CV_FAST_CAST_8U((tab1[src1[i]] + tab2[src2[i]]) >> shift);
1905 t1 = CV_FAST_CAST_8U((tab1[src1[i+1]] + tab2[src2[i+1]]) >> shift);
1910 t0 = CV_FAST_CAST_8U((tab1[src1[i+2]] + tab2[src2[i+2]]) >> shift);
1911 t1 = CV_FAST_CAST_8U((tab1[src1[i+3]] + tab2[src2[i+3]]) >> shift);
1919 t0 = CV_FAST_CAST_8U((tab1[src1[i]] + tab2[src2[i]]) >> shift);
1927 for( ; size.height--; src1 += step1, src2 += step2, dst += step )
1933 t0 = (tab1[src1[i]] + tab2[src2[i]]) >> shift;
1934 t1 = (tab1[src1[i+1]] + tab2[src2[i+1]]) >> shift;
1939 t0 = (tab1[src1[i+2]] + tab2[src2[i+2]]) >> shift;
1940 t1 = (tab1[src1[i+3]] + tab2[src2[i+3]]) >> shift;
1948 t0 = (tab1[src1[i]] + tab2[src2[i]]) >> shift;
1969 const void* src2, int step2, double beta,