Home | History | Annotate | Download | only in dec

Lines Matching defs:q0

277 #define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) {                      \
280 const __m128i t2 = MM_ABS(q1, q0); \
284 const __m128i t4 = _mm_subs_epu8(t2, h); /* abs(q1 - q0) - hev_tresh */ \
290 #define GET_BASE_DELTA(p1, p0, q0, q1, o) { \
291 const __m128i qp0 = _mm_subs_epi8(q0, p0); /* q0 - p0 */ \
293 o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 1 * (q0 - p0) */ \
294 o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 2 * (q0 - p0) */ \
295 o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 3 * (q0 - p0) */ \
298 #define DO_SIMPLE_FILTER(p0, q0, fl) { \
306 q0 = _mm_subs_epi8(q0, v4); /* q0 -= v4 */ \
324 static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
331 *mask = MM_ABS(*p0, *q0); // abs(p0 - q0)
332 *mask = _mm_adds_epu8(*mask, *mask); // abs(p0 - q0) * 2
333 *mask = _mm_adds_epu8(*mask, t1); // abs(p0 - q0) * 2 + abs(p1 - q1) / 2
343 // Applies filter on 2 pixels (p0 and q0)
344 static inline void DoFilter2(const __m128i* p1, __m128i* p0, __m128i* q0,
351 NeedsFilter(p1, p0, q0, q1, thresh, &mask);
354 FLIP_SIGN_BIT2(*p0, *q0);
356 GET_BASE_DELTA(p1s, *p0, *q0, q1s, a);
358 DO_SIMPLE_FILTER(*p0, *q0, a);
361 FLIP_SIGN_BIT2(*p0, *q0);
364 // Applies filter on 4 pixels (p1, p0, q0 and q1)
365 static inline void DoFilter4(__m128i* p1, __m128i *p0, __m128i* q0, __m128i* q1,
372 GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
375 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
379 t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0
380 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0)
381 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0)
382 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0)
387 t2 = _mm_adds_epi8(t1, t2); // 3 * (q0 - p0) + (p1 - q1) + 4
388 SIGNED_SHIFT_N(t2, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
390 *q0 = _mm_subs_epi8(*q0, t2); // q0 -= t2
395 SIGNED_SHIFT_N(t2, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
400 SIGNED_SHIFT_N(t3, 1); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4
407 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
410 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
412 __m128i* q0, __m128i* q1, __m128i *q2,
418 GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
421 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
424 GET_BASE_DELTA(*p1, *p0, *q0, *q1, a);
429 DO_SIMPLE_FILTER(*p0, *q0, f);
457 UPDATE_2PIXELS(*p0, *q0, a0_lo, a0_hi);
461 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
511 __m128i* q0, __m128i* q1) {
525 // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
528 Load8x4(r0, stride, p1, q0);
532 t2 = *q0;
535 // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
539 *q0 = _mm_unpacklo_epi64(t2, *q1);
553 __m128i* p0, __m128i* q0, __m128i* q1) {
562 // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
564 t1 = *q0;
565 *q0 = _mm_unpacklo_epi8(t1, *q1);
569 // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
571 *p0 = _mm_unpacklo_epi16(t1, *q0);
572 *q0 = _mm_unpackhi_epi16(t1, *q0);
582 Store4x4(q0, r0, stride);
596 __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]);
599 DoFilter2(&p1, &p0, &q0, &q1, thresh);
603 _mm_storeu_si128((__m128i*)p, q0);
607 __m128i p1, p0, q0, q1;
611 Load16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
612 DoFilter2(&p1, &p0, &q0, &q1, thresh);
613 Store16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
672 #define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) { \
677 NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes); \
686 __m128i p2, p1, p0, q0, q1, q2;
692 // Load q0, q1, q2, q3
693 LOAD_H_EDGES4(p, stride, q0, q1, q2, t1);
694 MAX_DIFF2(t1, q2, q1, q0, mask);
696 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
697 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
703 _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
711 __m128i p3, p2, p1, p0, q0, q1, q2, q3;
717 Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3
718 MAX_DIFF2(q3, q2, q1, q0, mask);
720 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
721 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
724 Store16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);
732 __m128i t1, t2, p1, p0, q0, q1;
741 // Load q0, q1, q2, q3
742 LOAD_H_EDGES4(p, stride, q0, q1, t1, t2);
743 MAX_DIFF2(t2, t1, q1, q0, mask);
745 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
746 DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
751 _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
761 __m128i t1, t2, p1, p0, q0, q1;
768 b += 4; // beginning of q0
769 Load16x4(b, b + 8 * stride, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3
770 MAX_DIFF2(t2, t1, q1, q0, mask);
772 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
773 DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
776 Store16x4(b, b + 8 * stride, stride, &p1, &p0, &q0, &q1);
786 __m128i t1, p2, p1, p0, q0, q1, q2;
792 // Load q0, q1, q2, q3
793 LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1);
794 MAX_DIFF2(t1, q2, q1, q0, mask);
796 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
797 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
803 STOREUV(q0, u, v, 0 * stride);
811 __m128i p3, p2, p1, p0, q0, q1, q2, q3;
818 Load16x4(u, v, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3
819 MAX_DIFF2(q3, q2, q1, q0, mask);
821 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
822 DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
825 Store16x4(u, v, stride, &q0, &q1, &q2, &q3);
831 __m128i t1, t2, p1, p0, q0, q1;
840 // Load q0, q1, q2, q3
841 LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2);
842 MAX_DIFF2(t2, t1, q1, q0, mask);
844 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
845 DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
850 STOREUV(q0, u, v, 0 * stride);
857 __m128i t1, t2, p1, p0, q0, q1;
861 u += 4; // beginning of q0
863 Load16x4(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3
864 MAX_DIFF2(t2, t1, q1, q0, mask);
866 COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
867 DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
871 Store16x4(u, v, stride, &p1, &p0, &q0, &q1);