Home | History | Annotate | Download | only in neon
      1 /*
      2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include <arm_neon.h>
     12 #include "./vpx_config.h"
     13 
     14 #if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
     15 static INLINE void write_2x8(unsigned char *dst, int pitch,
     16                              const uint8x8x2_t result,
     17                              const uint8x8x2_t result2) {
     18   vst2_lane_u8(dst, result, 0);
     19   dst += pitch;
     20   vst2_lane_u8(dst, result, 1);
     21   dst += pitch;
     22   vst2_lane_u8(dst, result, 2);
     23   dst += pitch;
     24   vst2_lane_u8(dst, result, 3);
     25   dst += pitch;
     26   vst2_lane_u8(dst, result, 4);
     27   dst += pitch;
     28   vst2_lane_u8(dst, result, 5);
     29   dst += pitch;
     30   vst2_lane_u8(dst, result, 6);
     31   dst += pitch;
     32   vst2_lane_u8(dst, result, 7);
     33   dst += pitch;
     34 
     35   vst2_lane_u8(dst, result2, 0);
     36   dst += pitch;
     37   vst2_lane_u8(dst, result2, 1);
     38   dst += pitch;
     39   vst2_lane_u8(dst, result2, 2);
     40   dst += pitch;
     41   vst2_lane_u8(dst, result2, 3);
     42   dst += pitch;
     43   vst2_lane_u8(dst, result2, 4);
     44   dst += pitch;
     45   vst2_lane_u8(dst, result2, 5);
     46   dst += pitch;
     47   vst2_lane_u8(dst, result2, 6);
     48   dst += pitch;
     49   vst2_lane_u8(dst, result2, 7);
     50 }
     51 #else
     52 static INLINE void write_2x4(unsigned char *dst, int pitch,
     53                              const uint8x8x2_t result) {
     54     /*
     55      * uint8x8x2_t result
     56     00 01 02 03 | 04 05 06 07
     57     10 11 12 13 | 14 15 16 17
     58     ---
     59     * after vtrn_u8
     60     00 10 02 12 | 04 14 06 16
     61     01 11 03 13 | 05 15 07 17
     62     */
     63     const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
     64                                        result.val[1]);
     65     const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
     66     const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
     67     vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
     68     dst += pitch;
     69     vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
     70     dst += pitch;
     71     vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
     72     dst += pitch;
     73     vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
     74     dst += pitch;
     75     vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
     76     dst += pitch;
     77     vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
     78     dst += pitch;
     79     vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
     80     dst += pitch;
     81     vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
     82 }
     83 
     84 static INLINE void write_2x8(unsigned char *dst, int pitch,
     85                              const uint8x8x2_t result,
     86                              const uint8x8x2_t result2) {
     87   write_2x4(dst, pitch, result);
     88   dst += pitch * 8;
     89   write_2x4(dst, pitch, result2);
     90 }
     91 #endif
     92 
     93 
     94 #if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
     95 static INLINE
     96 uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
     97     x = vld4_lane_u8(src, x, 0);
     98     src += pitch;
     99     x = vld4_lane_u8(src, x, 1);
    100     src += pitch;
    101     x = vld4_lane_u8(src, x, 2);
    102     src += pitch;
    103     x = vld4_lane_u8(src, x, 3);
    104     src += pitch;
    105     x = vld4_lane_u8(src, x, 4);
    106     src += pitch;
    107     x = vld4_lane_u8(src, x, 5);
    108     src += pitch;
    109     x = vld4_lane_u8(src, x, 6);
    110     src += pitch;
    111     x = vld4_lane_u8(src, x, 7);
    112     return x;
    113 }
    114 #else
    115 static INLINE
    116 uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
    117     const uint8x8_t a = vld1_u8(src);
    118     const uint8x8_t b = vld1_u8(src + pitch * 1);
    119     const uint8x8_t c = vld1_u8(src + pitch * 2);
    120     const uint8x8_t d = vld1_u8(src + pitch * 3);
    121     const uint8x8_t e = vld1_u8(src + pitch * 4);
    122     const uint8x8_t f = vld1_u8(src + pitch * 5);
    123     const uint8x8_t g = vld1_u8(src + pitch * 6);
    124     const uint8x8_t h = vld1_u8(src + pitch * 7);
    125     const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
    126                                           vreinterpret_u32_u8(e));
    127     const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
    128                                           vreinterpret_u32_u8(f));
    129     const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
    130                                           vreinterpret_u32_u8(g));
    131     const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
    132                                           vreinterpret_u32_u8(h));
    133     const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
    134                                           vreinterpret_u16_u32(r26_u32.val[0]));
    135     const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
    136                                           vreinterpret_u16_u32(r37_u32.val[0]));
    137     const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
    138                                        vreinterpret_u8_u16(r13_u16.val[0]));
    139     const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
    140                                        vreinterpret_u8_u16(r13_u16.val[1]));
    141     /*
    142      * after vtrn_u32
    143     00 01 02 03 | 40 41 42 43
    144     10 11 12 13 | 50 51 52 53
    145     20 21 22 23 | 60 61 62 63
    146     30 31 32 33 | 70 71 72 73
    147     ---
    148     * after vtrn_u16
    149     00 01 20 21 | 40 41 60 61
    150     02 03 22 23 | 42 43 62 63
    151     10 11 30 31 | 50 51 70 71
    152     12 13 32 33 | 52 52 72 73
    153 
    154     00 01 20 21 | 40 41 60 61
    155     10 11 30 31 | 50 51 70 71
    156     02 03 22 23 | 42 43 62 63
    157     12 13 32 33 | 52 52 72 73
    158     ---
    159     * after vtrn_u8
    160     00 10 20 30 | 40 50 60 70
    161     01 11 21 31 | 41 51 61 71
    162     02 12 22 32 | 42 52 62 72
    163     03 13 23 33 | 43 53 63 73
    164     */
    165     x.val[0] = r01_u8.val[0];
    166     x.val[1] = r01_u8.val[1];
    167     x.val[2] = r23_u8.val[0];
    168     x.val[3] = r23_u8.val[1];
    169 
    170     return x;
    171 }
    172 #endif
    173 
    174 static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
    175         unsigned char *s,
    176         int p,
    177         const unsigned char *blimit) {
    178     unsigned char *src1;
    179     uint8x16_t qblimit, q0u8;
    180     uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
    181     int16x8_t q2s16, q13s16, q11s16;
    182     int8x8_t d28s8, d29s8;
    183     int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
    184     uint8x8x4_t d0u8x4;  // d6, d7, d8, d9
    185     uint8x8x4_t d1u8x4;  // d10, d11, d12, d13
    186     uint8x8x2_t d2u8x2;  // d12, d13
    187     uint8x8x2_t d3u8x2;  // d14, d15
    188 
    189     qblimit = vdupq_n_u8(*blimit);
    190 
    191     src1 = s - 2;
    192     d0u8x4 = read_4x8(src1, p, d0u8x4);
    193     src1 += p * 8;
    194     d1u8x4 = read_4x8(src1, p, d1u8x4);
    195 
    196     q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]);  // d6 d10
    197     q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]);  // d8 d12
    198     q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]);  // d7 d11
    199     q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]);  // d9 d13
    200 
    201     q15u8 = vabdq_u8(q5u8, q4u8);
    202     q14u8 = vabdq_u8(q3u8, q6u8);
    203 
    204     q15u8 = vqaddq_u8(q15u8, q15u8);
    205     q14u8 = vshrq_n_u8(q14u8, 1);
    206     q0u8 = vdupq_n_u8(0x80);
    207     q11s16 = vdupq_n_s16(3);
    208     q15u8 = vqaddq_u8(q15u8, q14u8);
    209 
    210     q3u8 = veorq_u8(q3u8, q0u8);
    211     q4u8 = veorq_u8(q4u8, q0u8);
    212     q5u8 = veorq_u8(q5u8, q0u8);
    213     q6u8 = veorq_u8(q6u8, q0u8);
    214 
    215     q15u8 = vcgeq_u8(qblimit, q15u8);
    216 
    217     q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
    218                      vget_low_s8(vreinterpretq_s8_u8(q5u8)));
    219     q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
    220                       vget_high_s8(vreinterpretq_s8_u8(q5u8)));
    221 
    222     q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
    223                       vreinterpretq_s8_u8(q6u8));
    224 
    225     q2s16 = vmulq_s16(q2s16, q11s16);
    226     q13s16 = vmulq_s16(q13s16, q11s16);
    227 
    228     q11u8 = vdupq_n_u8(3);
    229     q12u8 = vdupq_n_u8(4);
    230 
    231     q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
    232     q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
    233 
    234     d28s8 = vqmovn_s16(q2s16);
    235     d29s8 = vqmovn_s16(q13s16);
    236     q14s8 = vcombine_s8(d28s8, d29s8);
    237 
    238     q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
    239 
    240     q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
    241     q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
    242     q2s8 = vshrq_n_s8(q2s8, 3);
    243     q14s8 = vshrq_n_s8(q3s8, 3);
    244 
    245     q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
    246     q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
    247 
    248     q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
    249     q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
    250 
    251     d2u8x2.val[0] = vget_low_u8(q6u8);   // d12
    252     d2u8x2.val[1] = vget_low_u8(q7u8);   // d14
    253     d3u8x2.val[0] = vget_high_u8(q6u8);  // d13
    254     d3u8x2.val[1] = vget_high_u8(q7u8);  // d15
    255 
    256     src1 = s - 1;
    257     write_2x8(src1, p, d2u8x2, d3u8x2);
    258 }
    259 
    260 void vp8_loop_filter_bvs_neon(
    261         unsigned char *y_ptr,
    262         int y_stride,
    263         const unsigned char *blimit) {
    264     y_ptr += 4;
    265     vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
    266     y_ptr += 4;
    267     vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
    268     y_ptr += 4;
    269     vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
    270     return;
    271 }
    272 
    273 void vp8_loop_filter_mbvs_neon(
    274         unsigned char *y_ptr,
    275         int y_stride,
    276         const unsigned char *blimit) {
    277     vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
    278     return;
    279 }
    280