Home | History | Annotate | Download | only in dsp
      1 // Copyright 2012 Google Inc. All Rights Reserved.
      2 //
      3 // Use of this source code is governed by a BSD-style license
      4 // that can be found in the COPYING file in the root of the source
      5 // tree. An additional intellectual property rights grant can be found
      6 // in the file PATENTS. All contributing project authors may
      7 // be found in the AUTHORS file in the root of the source tree.
      8 // -----------------------------------------------------------------------------
      9 //
     10 // ARM NEON version of dsp functions and loop filtering.
     11 //
     12 // Authors: Somnath Banerjee (somnath (at) google.com)
     13 //          Johann Koenig (johannkoenig (at) google.com)
     14 
     15 #include "./dsp.h"
     16 
     17 #if defined(__cplusplus) || defined(c_plusplus)
     18 extern "C" {
     19 #endif
     20 
     21 #if defined(WEBP_USE_NEON)
     22 
     23 #include "../dec/vp8i.h"
     24 
     25 #define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",                  \
     26               "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
     27 
     28 #define FLIP_SIGN_BIT2(a, b, s)                                                \
     29   "veor     " #a "," #a "," #s "               \n"                             \
     30   "veor     " #b "," #b "," #s "               \n"                             \
     31 
     32 #define FLIP_SIGN_BIT4(a, b, c, d, s)                                          \
     33   FLIP_SIGN_BIT2(a, b, s)                                                      \
     34   FLIP_SIGN_BIT2(c, d, s)                                                      \
     35 
     36 #define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask)                             \
     37   "vabd.u8    q15," #p0 "," #q0 "         \n"  /* abs(p0 - q0) */              \
     38   "vabd.u8    q14," #p1 "," #q1 "         \n"  /* abs(p1 - q1) */              \
     39   "vqadd.u8   q15, q15, q15               \n"  /* abs(p0 - q0) * 2 */          \
     40   "vshr.u8    q14, q14, #1                \n"  /* abs(p1 - q1) / 2 */          \
     41   "vqadd.u8   q15, q15, q14     \n"  /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
     42   "vdup.8     q14, " #thresh "            \n"                                  \
     43   "vcge.u8   " #mask ", q14, q15          \n"  /* mask <= thresh */
     44 
     45 #define GET_BASE_DELTA(p1, p0, q0, q1, o)                                      \
     46   "vqsub.s8   q15," #q0 "," #p0 "         \n"  /* (q0 - p0) */                 \
     47   "vqsub.s8  " #o "," #p1 "," #q1 "       \n"  /* (p1 - q1) */                 \
     48   "vqadd.s8  " #o "," #o ", q15           \n"  /* (p1 - q1) + 1 * (p0 - q0) */ \
     49   "vqadd.s8  " #o "," #o ", q15           \n"  /* (p1 - q1) + 2 * (p0 - q0) */ \
     50   "vqadd.s8  " #o "," #o ", q15           \n"  /* (p1 - q1) + 3 * (p0 - q0) */
     51 
     52 #define DO_SIMPLE_FILTER(p0, q0, fl)                                           \
     53   "vmov.i8    q15, #0x03                  \n"                                  \
     54   "vqadd.s8   q15, q15, " #fl "           \n"  /* filter1 = filter + 3 */      \
     55   "vshr.s8    q15, q15, #3                \n"  /* filter1 >> 3 */              \
     56   "vqadd.s8  " #p0 "," #p0 ", q15         \n"  /* p0 += filter1 */             \
     57                                                                                \
     58   "vmov.i8    q15, #0x04                  \n"                                  \
     59   "vqadd.s8   q15, q15, " #fl "           \n"  /* filter1 = filter + 4 */      \
     60   "vshr.s8    q15, q15, #3                \n"  /* filter2 >> 3 */              \
     61   "vqsub.s8  " #q0 "," #q0 ", q15         \n"  /* q0 -= filter2 */
     62 
     63 // Applies filter on 2 pixels (p0 and q0)
     64 #define DO_FILTER2(p1, p0, q0, q1, thresh)                                     \
     65   NEEDS_FILTER(p1, p0, q0, q1, thresh, q9)     /* filter mask in q9 */         \
     66   "vmov.i8    q10, #0x80                  \n"  /* sign bit */                  \
     67   FLIP_SIGN_BIT4(p1, p0, q0, q1, q10)          /* convert to signed value */   \
     68   GET_BASE_DELTA(p1, p0, q0, q1, q11)          /* get filter level  */         \
     69   "vand       q9, q9, q11                 \n"  /* apply filter mask */         \
     70   DO_SIMPLE_FILTER(p0, q0, q9)                 /* apply filter */              \
     71   FLIP_SIGN_BIT2(p0, q0, q10)
     72 
     73 // Load/Store vertical edge
     74 #define LOAD8x4(c1, c2, c3, c4, b1, b2, stride)                                \
     75   "vld4.8   {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \
     76   "vld4.8   {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \
     77   "vld4.8   {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \
     78   "vld4.8   {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \
     79   "vld4.8   {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \
     80   "vld4.8   {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \
     81   "vld4.8   {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
     82   "vld4.8   {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
     83 
     84 #define STORE8x2(c1, c2, p, stride)                                            \
     85   "vst2.8   {" #c1"[0], " #c2"[0]}," #p "," #stride " \n"                      \
     86   "vst2.8   {" #c1"[1], " #c2"[1]}," #p "," #stride " \n"                      \
     87   "vst2.8   {" #c1"[2], " #c2"[2]}," #p "," #stride " \n"                      \
     88   "vst2.8   {" #c1"[3], " #c2"[3]}," #p "," #stride " \n"                      \
     89   "vst2.8   {" #c1"[4], " #c2"[4]}," #p "," #stride " \n"                      \
     90   "vst2.8   {" #c1"[5], " #c2"[5]}," #p "," #stride " \n"                      \
     91   "vst2.8   {" #c1"[6], " #c2"[6]}," #p "," #stride " \n"                      \
     92   "vst2.8   {" #c1"[7], " #c2"[7]}," #p "," #stride " \n"
     93 
     94 //-----------------------------------------------------------------------------
     95 // Simple In-loop filtering (Paragraph 15.2)
     96 
     97 static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) {
     98   __asm__ volatile (
     99     "sub        %[p], %[p], %[stride], lsl #1  \n"  // p -= 2 * stride
    100 
    101     "vld1.u8    {q1}, [%[p]], %[stride]        \n"  // p1
    102     "vld1.u8    {q2}, [%[p]], %[stride]        \n"  // p0
    103     "vld1.u8    {q3}, [%[p]], %[stride]        \n"  // q0
    104     "vld1.u8    {q4}, [%[p]]                   \n"  // q1
    105 
    106     DO_FILTER2(q1, q2, q3, q4, %[thresh])
    107 
    108     "sub        %[p], %[p], %[stride], lsl #1  \n"  // p -= 2 * stride
    109 
    110     "vst1.u8    {q2}, [%[p]], %[stride]        \n"  // store op0
    111     "vst1.u8    {q3}, [%[p]]                   \n"  // store oq0
    112     : [p] "+r"(p)
    113     : [stride] "r"(stride), [thresh] "r"(thresh)
    114     : "memory", QRegs
    115   );
    116 }
    117 
    118 static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) {
    119   __asm__ volatile (
    120     "sub        r4, %[p], #2                   \n"  // base1 = p - 2
    121     "lsl        r6, %[stride], #1              \n"  // r6 = 2 * stride
    122     "add        r5, r4, %[stride]              \n"  // base2 = base1 + stride
    123 
    124     LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
    125     LOAD8x4(d6, d7, d8, d9, [r4], [r5], r6)
    126     "vswp       d3, d6                         \n"  // p1:q1 p0:q3
    127     "vswp       d5, d8                         \n"  // q0:q2 q1:q4
    128     "vswp       q2, q3                         \n"  // p1:q1 p0:q2 q0:q3 q1:q4
    129 
    130     DO_FILTER2(q1, q2, q3, q4, %[thresh])
    131 
    132     "sub        %[p], %[p], #1                 \n"  // p - 1
    133 
    134     "vswp        d5, d6                        \n"
    135     STORE8x2(d4, d5, [%[p]], %[stride])
    136     STORE8x2(d6, d7, [%[p]], %[stride])
    137 
    138     : [p] "+r"(p)
    139     : [stride] "r"(stride), [thresh] "r"(thresh)
    140     : "memory", "r4", "r5", "r6", QRegs
    141   );
    142 }
    143 
    144 static void SimpleVFilter16iNEON(uint8_t* p, int stride, int thresh) {
    145   int k;
    146   for (k = 3; k > 0; --k) {
    147     p += 4 * stride;
    148     SimpleVFilter16NEON(p, stride, thresh);
    149   }
    150 }
    151 
    152 static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
    153   int k;
    154   for (k = 3; k > 0; --k) {
    155     p += 4;
    156     SimpleHFilter16NEON(p, stride, thresh);
    157   }
    158 }
    159 
    160 //-----------------------------------------------------------------------------
    161 // Inverse transforms (Paragraph 14.4)
    162 
    163 static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
    164   const int kBPS = BPS;
    165   const int16_t constants[] = {20091, 17734, 0, 0};
    166   /* kC1, kC2. Padded because vld1.16 loads 8 bytes
    167    * Technically these are unsigned but vqdmulh is only available in signed.
    168    * vqdmulh returns high half (effectively >> 16) but also doubles the value,
    169    * changing the >> 16 to >> 15 and requiring an additional >> 1.
    170    * We use this to our advantage with kC2. The canonical value is 35468.
    171    * However, the high bit is set so treating it as signed will give incorrect
    172    * results. We avoid this by down shifting by 1 here to clear the highest bit.
    173    * Combined with the doubling effect of vqdmulh we get >> 16.
    174    * This can not be applied to kC1 because the lowest bit is set. Down shifting
    175    * the constant would reduce precision.
    176    */
    177 
    178   /* libwebp uses a trick to avoid some extra addition that libvpx does.
    179    * Instead of:
    180    * temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
    181    * libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
    182    * same issue with kC1 and vqdmulh that we work around by down shifting kC2
    183    */
    184 
    185   /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */
    186   __asm__ volatile (
    187     "vld1.16         {q1, q2}, [%[in]]           \n"
    188     "vld1.16         {d0}, [%[constants]]        \n"
    189 
    190     /* d2: in[0]
    191      * d3: in[8]
    192      * d4: in[4]
    193      * d5: in[12]
    194      */
    195     "vswp            d3, d4                      \n"
    196 
    197     /* q8 = {in[4], in[12]} * kC1 * 2 >> 16
    198      * q9 = {in[4], in[12]} * kC2 >> 16
    199      */
    200     "vqdmulh.s16     q8, q2, d0[0]               \n"
    201     "vqdmulh.s16     q9, q2, d0[1]               \n"
    202 
    203     /* d22 = a = in[0] + in[8]
    204      * d23 = b = in[0] - in[8]
    205      */
    206     "vqadd.s16       d22, d2, d3                 \n"
    207     "vqsub.s16       d23, d2, d3                 \n"
    208 
    209     /* The multiplication should be x * kC1 >> 16
    210      * However, with vqdmulh we get x * kC1 * 2 >> 16
    211      * (multiply, double, return high half)
    212      * We avoided this in kC2 by pre-shifting the constant.
    213      * q8 = in[4]/[12] * kC1 >> 16
    214      */
    215     "vshr.s16        q8, q8, #1                  \n"
    216 
    217     /* Add {in[4], in[12]} back after the multiplication. This is handled by
    218      * adding 1 << 16 to kC1 in the libwebp C code.
    219      */
    220     "vqadd.s16       q8, q2, q8                  \n"
    221 
    222     /* d20 = c = in[4]*kC2 - in[12]*kC1
    223      * d21 = d = in[4]*kC1 + in[12]*kC2
    224      */
    225     "vqsub.s16       d20, d18, d17               \n"
    226     "vqadd.s16       d21, d19, d16               \n"
    227 
    228     /* d2 = tmp[0] = a + d
    229      * d3 = tmp[1] = b + c
    230      * d4 = tmp[2] = b - c
    231      * d5 = tmp[3] = a - d
    232      */
    233     "vqadd.s16       d2, d22, d21                \n"
    234     "vqadd.s16       d3, d23, d20                \n"
    235     "vqsub.s16       d4, d23, d20                \n"
    236     "vqsub.s16       d5, d22, d21                \n"
    237 
    238     "vzip.16         q1, q2                      \n"
    239     "vzip.16         q1, q2                      \n"
    240 
    241     "vswp            d3, d4                      \n"
    242 
    243     /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
    244      * q9 = {tmp[4], tmp[12]} * kC2 >> 16
    245      */
    246     "vqdmulh.s16     q8, q2, d0[0]               \n"
    247     "vqdmulh.s16     q9, q2, d0[1]               \n"
    248 
    249     /* d22 = a = tmp[0] + tmp[8]
    250      * d23 = b = tmp[0] - tmp[8]
    251      */
    252     "vqadd.s16       d22, d2, d3                 \n"
    253     "vqsub.s16       d23, d2, d3                 \n"
    254 
    255     /* See long winded explanations prior */
    256     "vshr.s16        q8, q8, #1                  \n"
    257     "vqadd.s16       q8, q2, q8                  \n"
    258 
    259     /* d20 = c = in[4]*kC2 - in[12]*kC1
    260      * d21 = d = in[4]*kC1 + in[12]*kC2
    261      */
    262     "vqsub.s16       d20, d18, d17               \n"
    263     "vqadd.s16       d21, d19, d16               \n"
    264 
    265     /* d2 = tmp[0] = a + d
    266      * d3 = tmp[1] = b + c
    267      * d4 = tmp[2] = b - c
    268      * d5 = tmp[3] = a - d
    269      */
    270     "vqadd.s16       d2, d22, d21                \n"
    271     "vqadd.s16       d3, d23, d20                \n"
    272     "vqsub.s16       d4, d23, d20                \n"
    273     "vqsub.s16       d5, d22, d21                \n"
    274 
    275     "vld1.32         d6[0], [%[dst]], %[kBPS]    \n"
    276     "vld1.32         d6[1], [%[dst]], %[kBPS]    \n"
    277     "vld1.32         d7[0], [%[dst]], %[kBPS]    \n"
    278     "vld1.32         d7[1], [%[dst]], %[kBPS]    \n"
    279 
    280     "sub         %[dst], %[dst], %[kBPS], lsl #2 \n"
    281 
    282     /* (val) + 4 >> 3 */
    283     "vrshr.s16       d2, d2, #3                  \n"
    284     "vrshr.s16       d3, d3, #3                  \n"
    285     "vrshr.s16       d4, d4, #3                  \n"
    286     "vrshr.s16       d5, d5, #3                  \n"
    287 
    288     "vzip.16         q1, q2                      \n"
    289     "vzip.16         q1, q2                      \n"
    290 
    291     /* Must accumulate before saturating */
    292     "vmovl.u8        q8, d6                      \n"
    293     "vmovl.u8        q9, d7                      \n"
    294 
    295     "vqadd.s16       q1, q1, q8                  \n"
    296     "vqadd.s16       q2, q2, q9                  \n"
    297 
    298     "vqmovun.s16     d0, q1                      \n"
    299     "vqmovun.s16     d1, q2                      \n"
    300 
    301     "vst1.32         d0[0], [%[dst]], %[kBPS]    \n"
    302     "vst1.32         d0[1], [%[dst]], %[kBPS]    \n"
    303     "vst1.32         d1[0], [%[dst]], %[kBPS]    \n"
    304     "vst1.32         d1[1], [%[dst]]             \n"
    305 
    306     : [in] "+r"(in), [dst] "+r"(dst)  /* modified registers */
    307     : [kBPS] "r"(kBPS), [constants] "r"(constants)  /* constants */
    308     : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11"  /* clobbered */
    309   );
    310 }
    311 
    312 static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) {
    313   TransformOneNEON(in, dst);
    314   if (do_two) {
    315     TransformOneNEON(in + 16, dst + 4);
    316   }
    317 }
    318 
    319 static void TransformWHT(const int16_t* in, int16_t* out) {
    320   const int kStep = 32;  // The store is only incrementing the pointer as if we
    321                          // had stored a single byte.
    322   __asm__ volatile (
    323     // part 1
    324     // load data into q0, q1
    325     "vld1.16         {q0, q1}, [%[in]]           \n"
    326 
    327     "vaddl.s16       q2, d0, d3                  \n" // a0 = in[0] + in[12]
    328     "vaddl.s16       q3, d1, d2                  \n" // a1 = in[4] + in[8]
    329     "vsubl.s16       q4, d1, d2                  \n" // a2 = in[4] - in[8]
    330     "vsubl.s16       q5, d0, d3                  \n" // a3 = in[0] - in[12]
    331 
    332     "vadd.s32        q0, q2, q3                  \n" // tmp[0] = a0 + a1
    333     "vsub.s32        q2, q2, q3                  \n" // tmp[8] = a0 - a1
    334     "vadd.s32        q1, q5, q4                  \n" // tmp[4] = a3 + a2
    335     "vsub.s32        q3, q5, q4                  \n" // tmp[12] = a3 - a2
    336 
    337     // Transpose
    338     // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
    339     // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
    340     "vswp            d1, d4                      \n" // vtrn.64 q0, q2
    341     "vswp            d3, d6                      \n" // vtrn.64 q1, q3
    342     "vtrn.32         q0, q1                      \n"
    343     "vtrn.32         q2, q3                      \n"
    344 
    345     "vmov.s32        q4, #3                      \n" // dc = 3
    346     "vadd.s32        q0, q0, q4                  \n" // dc = tmp[0] + 3
    347     "vadd.s32        q6, q0, q3                  \n" // a0 = dc + tmp[3]
    348     "vadd.s32        q7, q1, q2                  \n" // a1 = tmp[1] + tmp[2]
    349     "vsub.s32        q8, q1, q2                  \n" // a2 = tmp[1] - tmp[2]
    350     "vsub.s32        q9, q0, q3                  \n" // a3 = dc - tmp[3]
    351 
    352     "vadd.s32        q0, q6, q7                  \n"
    353     "vshrn.s32       d0, q0, #3                  \n" // (a0 + a1) >> 3
    354     "vadd.s32        q1, q9, q8                  \n"
    355     "vshrn.s32       d1, q1, #3                  \n" // (a3 + a2) >> 3
    356     "vsub.s32        q2, q6, q7                  \n"
    357     "vshrn.s32       d2, q2, #3                  \n" // (a0 - a1) >> 3
    358     "vsub.s32        q3, q9, q8                  \n"
    359     "vshrn.s32       d3, q3, #3                  \n" // (a3 - a2) >> 3
    360 
    361     // set the results to output
    362     "vst1.16         d0[0], [%[out]], %[kStep]   \n"
    363     "vst1.16         d1[0], [%[out]], %[kStep]   \n"
    364     "vst1.16         d2[0], [%[out]], %[kStep]   \n"
    365     "vst1.16         d3[0], [%[out]], %[kStep]   \n"
    366     "vst1.16         d0[1], [%[out]], %[kStep]   \n"
    367     "vst1.16         d1[1], [%[out]], %[kStep]   \n"
    368     "vst1.16         d2[1], [%[out]], %[kStep]   \n"
    369     "vst1.16         d3[1], [%[out]], %[kStep]   \n"
    370     "vst1.16         d0[2], [%[out]], %[kStep]   \n"
    371     "vst1.16         d1[2], [%[out]], %[kStep]   \n"
    372     "vst1.16         d2[2], [%[out]], %[kStep]   \n"
    373     "vst1.16         d3[2], [%[out]], %[kStep]   \n"
    374     "vst1.16         d0[3], [%[out]], %[kStep]   \n"
    375     "vst1.16         d1[3], [%[out]], %[kStep]   \n"
    376     "vst1.16         d2[3], [%[out]], %[kStep]   \n"
    377     "vst1.16         d3[3], [%[out]], %[kStep]   \n"
    378 
    379     : [out] "+r"(out)  // modified registers
    380     : [in] "r"(in), [kStep] "r"(kStep)  // constants
    381     : "memory", "q0", "q1", "q2", "q3", "q4",
    382       "q5", "q6", "q7", "q8", "q9"  // clobbered
    383   );
    384 }
    385 
    386 #endif   // WEBP_USE_NEON
    387 
    388 //------------------------------------------------------------------------------
    389 // Entry point
    390 
    391 extern void VP8DspInitNEON(void);
    392 
    393 void VP8DspInitNEON(void) {
    394 #if defined(WEBP_USE_NEON)
    395   VP8Transform = TransformTwoNEON;
    396   VP8TransformWHT = TransformWHT;
    397 
    398   VP8SimpleVFilter16 = SimpleVFilter16NEON;
    399   VP8SimpleHFilter16 = SimpleHFilter16NEON;
    400   VP8SimpleVFilter16i = SimpleVFilter16iNEON;
    401   VP8SimpleHFilter16i = SimpleHFilter16iNEON;
    402 #endif   // WEBP_USE_NEON
    403 }
    404 
    405 #if defined(__cplusplus) || defined(c_plusplus)
    406 }    // extern "C"
    407 #endif
    408