Home | History | Annotate | Download | only in arm
      1 /*
      2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include <arm_neon.h>
     12 
     13 #include "./vpx_config.h"
     14 #include "./vpx_dsp_rtcd.h"
     15 #include "vpx_dsp/arm/idct_neon.h"
     16 #include "vpx_dsp/arm/transpose_neon.h"
     17 #include "vpx_dsp/txfm_common.h"
     18 
     19 static INLINE void load_from_transformed(const int32_t *const trans_buf,
     20                                          const int first, const int second,
     21                                          int32x4x2_t *const q0,
     22                                          int32x4x2_t *const q1) {
     23   q0->val[0] = vld1q_s32(trans_buf + first * 8);
     24   q0->val[1] = vld1q_s32(trans_buf + first * 8 + 4);
     25   q1->val[0] = vld1q_s32(trans_buf + second * 8);
     26   q1->val[1] = vld1q_s32(trans_buf + second * 8 + 4);
     27 }
     28 
     29 static INLINE void load_from_output(const int32_t *const out, const int first,
     30                                     const int second, int32x4x2_t *const q0,
     31                                     int32x4x2_t *const q1) {
     32   q0->val[0] = vld1q_s32(out + first * 32);
     33   q0->val[1] = vld1q_s32(out + first * 32 + 4);
     34   q1->val[0] = vld1q_s32(out + second * 32);
     35   q1->val[1] = vld1q_s32(out + second * 32 + 4);
     36 }
     37 
     38 static INLINE void store_in_output(int32_t *const out, const int first,
     39                                    const int second, const int32x4x2_t q0,
     40                                    const int32x4x2_t q1) {
     41   vst1q_s32(out + first * 32, q0.val[0]);
     42   vst1q_s32(out + first * 32 + 4, q0.val[1]);
     43   vst1q_s32(out + second * 32, q1.val[0]);
     44   vst1q_s32(out + second * 32 + 4, q1.val[1]);
     45 }
     46 
     47 static INLINE void highbd_store_combine_results(
     48     uint16_t *p1, uint16_t *p2, const int stride, const int32x4x2_t q0,
     49     const int32x4x2_t q1, const int32x4x2_t q2, const int32x4x2_t q3,
     50     const int16x8_t max) {
     51   int16x8_t o[4];
     52   uint16x8_t d[4];
     53 
     54   d[0] = vld1q_u16(p1);
     55   p1 += stride;
     56   d[1] = vld1q_u16(p1);
     57   d[3] = vld1q_u16(p2);
     58   p2 -= stride;
     59   d[2] = vld1q_u16(p2);
     60 
     61   o[0] = vcombine_s16(vrshrn_n_s32(q0.val[0], 6), vrshrn_n_s32(q0.val[1], 6));
     62   o[1] = vcombine_s16(vrshrn_n_s32(q1.val[0], 6), vrshrn_n_s32(q1.val[1], 6));
     63   o[2] = vcombine_s16(vrshrn_n_s32(q2.val[0], 6), vrshrn_n_s32(q2.val[1], 6));
     64   o[3] = vcombine_s16(vrshrn_n_s32(q3.val[0], 6), vrshrn_n_s32(q3.val[1], 6));
     65 
     66   o[0] = vqaddq_s16(o[0], vreinterpretq_s16_u16(d[0]));
     67   o[1] = vqaddq_s16(o[1], vreinterpretq_s16_u16(d[1]));
     68   o[2] = vqaddq_s16(o[2], vreinterpretq_s16_u16(d[2]));
     69   o[3] = vqaddq_s16(o[3], vreinterpretq_s16_u16(d[3]));
     70   o[0] = vminq_s16(o[0], max);
     71   o[1] = vminq_s16(o[1], max);
     72   o[2] = vminq_s16(o[2], max);
     73   o[3] = vminq_s16(o[3], max);
     74   d[0] = vqshluq_n_s16(o[0], 0);
     75   d[1] = vqshluq_n_s16(o[1], 0);
     76   d[2] = vqshluq_n_s16(o[2], 0);
     77   d[3] = vqshluq_n_s16(o[3], 0);
     78 
     79   vst1q_u16(p1, d[1]);
     80   p1 -= stride;
     81   vst1q_u16(p1, d[0]);
     82   vst1q_u16(p2, d[2]);
     83   p2 += stride;
     84   vst1q_u16(p2, d[3]);
     85 }
     86 
     87 static INLINE void do_butterfly(const int32x4x2_t qIn0, const int32x4x2_t qIn1,
     88                                 const int32_t first_const,
     89                                 const int32_t second_const,
     90                                 int32x4x2_t *const qOut0,
     91                                 int32x4x2_t *const qOut1) {
     92   int64x2x2_t q[4];
     93   int32x2_t d[6];
     94 
     95   // Note: using v{mul, mla, mls}l_n_s32 here slows down 35% with gcc 4.9.
     96   d[4] = vdup_n_s32(first_const);
     97   d[5] = vdup_n_s32(second_const);
     98 
     99   q[0].val[0] = vmull_s32(vget_low_s32(qIn0.val[0]), d[4]);
    100   q[0].val[1] = vmull_s32(vget_high_s32(qIn0.val[0]), d[4]);
    101   q[1].val[0] = vmull_s32(vget_low_s32(qIn0.val[1]), d[4]);
    102   q[1].val[1] = vmull_s32(vget_high_s32(qIn0.val[1]), d[4]);
    103   q[0].val[0] = vmlsl_s32(q[0].val[0], vget_low_s32(qIn1.val[0]), d[5]);
    104   q[0].val[1] = vmlsl_s32(q[0].val[1], vget_high_s32(qIn1.val[0]), d[5]);
    105   q[1].val[0] = vmlsl_s32(q[1].val[0], vget_low_s32(qIn1.val[1]), d[5]);
    106   q[1].val[1] = vmlsl_s32(q[1].val[1], vget_high_s32(qIn1.val[1]), d[5]);
    107 
    108   q[2].val[0] = vmull_s32(vget_low_s32(qIn0.val[0]), d[5]);
    109   q[2].val[1] = vmull_s32(vget_high_s32(qIn0.val[0]), d[5]);
    110   q[3].val[0] = vmull_s32(vget_low_s32(qIn0.val[1]), d[5]);
    111   q[3].val[1] = vmull_s32(vget_high_s32(qIn0.val[1]), d[5]);
    112   q[2].val[0] = vmlal_s32(q[2].val[0], vget_low_s32(qIn1.val[0]), d[4]);
    113   q[2].val[1] = vmlal_s32(q[2].val[1], vget_high_s32(qIn1.val[0]), d[4]);
    114   q[3].val[0] = vmlal_s32(q[3].val[0], vget_low_s32(qIn1.val[1]), d[4]);
    115   q[3].val[1] = vmlal_s32(q[3].val[1], vget_high_s32(qIn1.val[1]), d[4]);
    116 
    117   qOut0->val[0] = vcombine_s32(vrshrn_n_s64(q[0].val[0], DCT_CONST_BITS),
    118                                vrshrn_n_s64(q[0].val[1], DCT_CONST_BITS));
    119   qOut0->val[1] = vcombine_s32(vrshrn_n_s64(q[1].val[0], DCT_CONST_BITS),
    120                                vrshrn_n_s64(q[1].val[1], DCT_CONST_BITS));
    121   qOut1->val[0] = vcombine_s32(vrshrn_n_s64(q[2].val[0], DCT_CONST_BITS),
    122                                vrshrn_n_s64(q[2].val[1], DCT_CONST_BITS));
    123   qOut1->val[1] = vcombine_s32(vrshrn_n_s64(q[3].val[0], DCT_CONST_BITS),
    124                                vrshrn_n_s64(q[3].val[1], DCT_CONST_BITS));
    125 }
    126 
    127 static INLINE void load_s32x4q_dual(
    128     const int32_t *in, int32x4x2_t *const s0, int32x4x2_t *const s1,
    129     int32x4x2_t *const s2, int32x4x2_t *const s3, int32x4x2_t *const s4,
    130     int32x4x2_t *const s5, int32x4x2_t *const s6, int32x4x2_t *const s7) {
    131   s0->val[0] = vld1q_s32(in);
    132   s0->val[1] = vld1q_s32(in + 4);
    133   in += 32;
    134   s1->val[0] = vld1q_s32(in);
    135   s1->val[1] = vld1q_s32(in + 4);
    136   in += 32;
    137   s2->val[0] = vld1q_s32(in);
    138   s2->val[1] = vld1q_s32(in + 4);
    139   in += 32;
    140   s3->val[0] = vld1q_s32(in);
    141   s3->val[1] = vld1q_s32(in + 4);
    142   in += 32;
    143   s4->val[0] = vld1q_s32(in);
    144   s4->val[1] = vld1q_s32(in + 4);
    145   in += 32;
    146   s5->val[0] = vld1q_s32(in);
    147   s5->val[1] = vld1q_s32(in + 4);
    148   in += 32;
    149   s6->val[0] = vld1q_s32(in);
    150   s6->val[1] = vld1q_s32(in + 4);
    151   in += 32;
    152   s7->val[0] = vld1q_s32(in);
    153   s7->val[1] = vld1q_s32(in + 4);
    154 }
    155 
    156 static INLINE void transpose_and_store_s32_8x8(int32x4x2_t a0, int32x4x2_t a1,
    157                                                int32x4x2_t a2, int32x4x2_t a3,
    158                                                int32x4x2_t a4, int32x4x2_t a5,
    159                                                int32x4x2_t a6, int32x4x2_t a7,
    160                                                int32_t **out) {
    161   transpose_s32_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
    162 
    163   vst1q_s32(*out, a0.val[0]);
    164   *out += 4;
    165   vst1q_s32(*out, a0.val[1]);
    166   *out += 4;
    167   vst1q_s32(*out, a1.val[0]);
    168   *out += 4;
    169   vst1q_s32(*out, a1.val[1]);
    170   *out += 4;
    171   vst1q_s32(*out, a2.val[0]);
    172   *out += 4;
    173   vst1q_s32(*out, a2.val[1]);
    174   *out += 4;
    175   vst1q_s32(*out, a3.val[0]);
    176   *out += 4;
    177   vst1q_s32(*out, a3.val[1]);
    178   *out += 4;
    179   vst1q_s32(*out, a4.val[0]);
    180   *out += 4;
    181   vst1q_s32(*out, a4.val[1]);
    182   *out += 4;
    183   vst1q_s32(*out, a5.val[0]);
    184   *out += 4;
    185   vst1q_s32(*out, a5.val[1]);
    186   *out += 4;
    187   vst1q_s32(*out, a6.val[0]);
    188   *out += 4;
    189   vst1q_s32(*out, a6.val[1]);
    190   *out += 4;
    191   vst1q_s32(*out, a7.val[0]);
    192   *out += 4;
    193   vst1q_s32(*out, a7.val[1]);
    194   *out += 4;
    195 }
    196 
    197 static INLINE void idct32_transpose_pair(const int32_t *input, int32_t *t_buf) {
    198   int i;
    199   int32x4x2_t s0, s1, s2, s3, s4, s5, s6, s7;
    200 
    201   for (i = 0; i < 4; i++, input += 8) {
    202     load_s32x4q_dual(input, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
    203     transpose_and_store_s32_8x8(s0, s1, s2, s3, s4, s5, s6, s7, &t_buf);
    204   }
    205 }
    206 
    207 static INLINE void idct32_bands_end_1st_pass(int32_t *const out,
    208                                              int32x4x2_t *const q) {
    209   store_in_output(out, 16, 17, q[6], q[7]);
    210   store_in_output(out, 14, 15, q[8], q[9]);
    211 
    212   load_from_output(out, 30, 31, &q[0], &q[1]);
    213   q[4] = highbd_idct_add_dual(q[2], q[1]);
    214   q[5] = highbd_idct_add_dual(q[3], q[0]);
    215   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    216   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    217   store_in_output(out, 30, 31, q[6], q[7]);
    218   store_in_output(out, 0, 1, q[4], q[5]);
    219 
    220   load_from_output(out, 12, 13, &q[0], &q[1]);
    221   q[2] = highbd_idct_add_dual(q[10], q[1]);
    222   q[3] = highbd_idct_add_dual(q[11], q[0]);
    223   q[4] = highbd_idct_sub_dual(q[11], q[0]);
    224   q[5] = highbd_idct_sub_dual(q[10], q[1]);
    225 
    226   load_from_output(out, 18, 19, &q[0], &q[1]);
    227   q[8] = highbd_idct_add_dual(q[4], q[1]);
    228   q[9] = highbd_idct_add_dual(q[5], q[0]);
    229   q[6] = highbd_idct_sub_dual(q[5], q[0]);
    230   q[7] = highbd_idct_sub_dual(q[4], q[1]);
    231   store_in_output(out, 18, 19, q[6], q[7]);
    232   store_in_output(out, 12, 13, q[8], q[9]);
    233 
    234   load_from_output(out, 28, 29, &q[0], &q[1]);
    235   q[4] = highbd_idct_add_dual(q[2], q[1]);
    236   q[5] = highbd_idct_add_dual(q[3], q[0]);
    237   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    238   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    239   store_in_output(out, 28, 29, q[6], q[7]);
    240   store_in_output(out, 2, 3, q[4], q[5]);
    241 
    242   load_from_output(out, 10, 11, &q[0], &q[1]);
    243   q[2] = highbd_idct_add_dual(q[12], q[1]);
    244   q[3] = highbd_idct_add_dual(q[13], q[0]);
    245   q[4] = highbd_idct_sub_dual(q[13], q[0]);
    246   q[5] = highbd_idct_sub_dual(q[12], q[1]);
    247 
    248   load_from_output(out, 20, 21, &q[0], &q[1]);
    249   q[8] = highbd_idct_add_dual(q[4], q[1]);
    250   q[9] = highbd_idct_add_dual(q[5], q[0]);
    251   q[6] = highbd_idct_sub_dual(q[5], q[0]);
    252   q[7] = highbd_idct_sub_dual(q[4], q[1]);
    253   store_in_output(out, 20, 21, q[6], q[7]);
    254   store_in_output(out, 10, 11, q[8], q[9]);
    255 
    256   load_from_output(out, 26, 27, &q[0], &q[1]);
    257   q[4] = highbd_idct_add_dual(q[2], q[1]);
    258   q[5] = highbd_idct_add_dual(q[3], q[0]);
    259   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    260   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    261   store_in_output(out, 26, 27, q[6], q[7]);
    262   store_in_output(out, 4, 5, q[4], q[5]);
    263 
    264   load_from_output(out, 8, 9, &q[0], &q[1]);
    265   q[2] = highbd_idct_add_dual(q[14], q[1]);
    266   q[3] = highbd_idct_add_dual(q[15], q[0]);
    267   q[4] = highbd_idct_sub_dual(q[15], q[0]);
    268   q[5] = highbd_idct_sub_dual(q[14], q[1]);
    269 
    270   load_from_output(out, 22, 23, &q[0], &q[1]);
    271   q[8] = highbd_idct_add_dual(q[4], q[1]);
    272   q[9] = highbd_idct_add_dual(q[5], q[0]);
    273   q[6] = highbd_idct_sub_dual(q[5], q[0]);
    274   q[7] = highbd_idct_sub_dual(q[4], q[1]);
    275   store_in_output(out, 22, 23, q[6], q[7]);
    276   store_in_output(out, 8, 9, q[8], q[9]);
    277 
    278   load_from_output(out, 24, 25, &q[0], &q[1]);
    279   q[4] = highbd_idct_add_dual(q[2], q[1]);
    280   q[5] = highbd_idct_add_dual(q[3], q[0]);
    281   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    282   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    283   store_in_output(out, 24, 25, q[6], q[7]);
    284   store_in_output(out, 6, 7, q[4], q[5]);
    285 }
    286 
    287 static INLINE void idct32_bands_end_2nd_pass(const int32_t *const out,
    288                                              uint16_t *const dest,
    289                                              const int stride,
    290                                              const int16x8_t max,
    291                                              int32x4x2_t *const q) {
    292   uint16_t *dest0 = dest + 0 * stride;
    293   uint16_t *dest1 = dest + 31 * stride;
    294   uint16_t *dest2 = dest + 16 * stride;
    295   uint16_t *dest3 = dest + 15 * stride;
    296   const int str2 = stride << 1;
    297 
    298   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
    299                                max);
    300   dest2 += str2;
    301   dest3 -= str2;
    302 
    303   load_from_output(out, 30, 31, &q[0], &q[1]);
    304   q[4] = highbd_idct_add_dual(q[2], q[1]);
    305   q[5] = highbd_idct_add_dual(q[3], q[0]);
    306   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    307   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    308   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
    309                                max);
    310   dest0 += str2;
    311   dest1 -= str2;
    312 
    313   load_from_output(out, 12, 13, &q[0], &q[1]);
    314   q[2] = highbd_idct_add_dual(q[10], q[1]);
    315   q[3] = highbd_idct_add_dual(q[11], q[0]);
    316   q[4] = highbd_idct_sub_dual(q[11], q[0]);
    317   q[5] = highbd_idct_sub_dual(q[10], q[1]);
    318 
    319   load_from_output(out, 18, 19, &q[0], &q[1]);
    320   q[8] = highbd_idct_add_dual(q[4], q[1]);
    321   q[9] = highbd_idct_add_dual(q[5], q[0]);
    322   q[6] = highbd_idct_sub_dual(q[5], q[0]);
    323   q[7] = highbd_idct_sub_dual(q[4], q[1]);
    324   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
    325                                max);
    326   dest2 += str2;
    327   dest3 -= str2;
    328 
    329   load_from_output(out, 28, 29, &q[0], &q[1]);
    330   q[4] = highbd_idct_add_dual(q[2], q[1]);
    331   q[5] = highbd_idct_add_dual(q[3], q[0]);
    332   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    333   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    334   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
    335                                max);
    336   dest0 += str2;
    337   dest1 -= str2;
    338 
    339   load_from_output(out, 10, 11, &q[0], &q[1]);
    340   q[2] = highbd_idct_add_dual(q[12], q[1]);
    341   q[3] = highbd_idct_add_dual(q[13], q[0]);
    342   q[4] = highbd_idct_sub_dual(q[13], q[0]);
    343   q[5] = highbd_idct_sub_dual(q[12], q[1]);
    344 
    345   load_from_output(out, 20, 21, &q[0], &q[1]);
    346   q[8] = highbd_idct_add_dual(q[4], q[1]);
    347   q[9] = highbd_idct_add_dual(q[5], q[0]);
    348   q[6] = highbd_idct_sub_dual(q[5], q[0]);
    349   q[7] = highbd_idct_sub_dual(q[4], q[1]);
    350   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
    351                                max);
    352   dest2 += str2;
    353   dest3 -= str2;
    354 
    355   load_from_output(out, 26, 27, &q[0], &q[1]);
    356   q[4] = highbd_idct_add_dual(q[2], q[1]);
    357   q[5] = highbd_idct_add_dual(q[3], q[0]);
    358   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    359   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    360   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
    361                                max);
    362   dest0 += str2;
    363   dest1 -= str2;
    364 
    365   load_from_output(out, 8, 9, &q[0], &q[1]);
    366   q[2] = highbd_idct_add_dual(q[14], q[1]);
    367   q[3] = highbd_idct_add_dual(q[15], q[0]);
    368   q[4] = highbd_idct_sub_dual(q[15], q[0]);
    369   q[5] = highbd_idct_sub_dual(q[14], q[1]);
    370 
    371   load_from_output(out, 22, 23, &q[0], &q[1]);
    372   q[8] = highbd_idct_add_dual(q[4], q[1]);
    373   q[9] = highbd_idct_add_dual(q[5], q[0]);
    374   q[6] = highbd_idct_sub_dual(q[5], q[0]);
    375   q[7] = highbd_idct_sub_dual(q[4], q[1]);
    376   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
    377                                max);
    378 
    379   load_from_output(out, 24, 25, &q[0], &q[1]);
    380   q[4] = highbd_idct_add_dual(q[2], q[1]);
    381   q[5] = highbd_idct_add_dual(q[3], q[0]);
    382   q[6] = highbd_idct_sub_dual(q[3], q[0]);
    383   q[7] = highbd_idct_sub_dual(q[2], q[1]);
    384   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
    385                                max);
    386 }
    387 
    388 static INLINE void vpx_highbd_idct32_32_neon(const tran_low_t *input,
    389                                              uint16_t *dst, const int stride,
    390                                              const int bd) {
    391   int i, idct32_pass_loop;
    392   int32_t trans_buf[32 * 8];
    393   int32_t pass1[32 * 32];
    394   int32_t pass2[32 * 32];
    395   int32_t *out;
    396   int32x4x2_t q[16];
    397 
    398   for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
    399        idct32_pass_loop++, input = pass1, out = pass2) {
    400     for (i = 0; i < 4; i++, out += 8) {  // idct32_bands_loop
    401       idct32_transpose_pair(input, trans_buf);
    402       input += 32 * 8;
    403 
    404       // -----------------------------------------
    405       // BLOCK A: 16-19,28-31
    406       // -----------------------------------------
    407       // generate 16,17,30,31
    408       // part of stage 1
    409       load_from_transformed(trans_buf, 1, 31, &q[14], &q[13]);
    410       do_butterfly(q[14], q[13], cospi_31_64, cospi_1_64, &q[0], &q[2]);
    411       load_from_transformed(trans_buf, 17, 15, &q[14], &q[13]);
    412       do_butterfly(q[14], q[13], cospi_15_64, cospi_17_64, &q[1], &q[3]);
    413       // part of stage 2
    414       q[4] = highbd_idct_add_dual(q[0], q[1]);
    415       q[13] = highbd_idct_sub_dual(q[0], q[1]);
    416       q[6] = highbd_idct_add_dual(q[2], q[3]);
    417       q[14] = highbd_idct_sub_dual(q[2], q[3]);
    418       // part of stage 3
    419       do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[5], &q[7]);
    420 
    421       // generate 18,19,28,29
    422       // part of stage 1
    423       load_from_transformed(trans_buf, 9, 23, &q[14], &q[13]);
    424       do_butterfly(q[14], q[13], cospi_23_64, cospi_9_64, &q[0], &q[2]);
    425       load_from_transformed(trans_buf, 25, 7, &q[14], &q[13]);
    426       do_butterfly(q[14], q[13], cospi_7_64, cospi_25_64, &q[1], &q[3]);
    427       // part of stage 2
    428       q[13] = highbd_idct_sub_dual(q[3], q[2]);
    429       q[3] = highbd_idct_add_dual(q[3], q[2]);
    430       q[14] = highbd_idct_sub_dual(q[1], q[0]);
    431       q[2] = highbd_idct_add_dual(q[1], q[0]);
    432       // part of stage 3
    433       do_butterfly(q[14], q[13], -cospi_4_64, -cospi_28_64, &q[1], &q[0]);
    434       // part of stage 4
    435       q[8] = highbd_idct_add_dual(q[4], q[2]);
    436       q[9] = highbd_idct_add_dual(q[5], q[0]);
    437       q[10] = highbd_idct_add_dual(q[7], q[1]);
    438       q[15] = highbd_idct_add_dual(q[6], q[3]);
    439       q[13] = highbd_idct_sub_dual(q[5], q[0]);
    440       q[14] = highbd_idct_sub_dual(q[7], q[1]);
    441       store_in_output(out, 16, 31, q[8], q[15]);
    442       store_in_output(out, 17, 30, q[9], q[10]);
    443       // part of stage 5
    444       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[0], &q[1]);
    445       store_in_output(out, 29, 18, q[1], q[0]);
    446       // part of stage 4
    447       q[13] = highbd_idct_sub_dual(q[4], q[2]);
    448       q[14] = highbd_idct_sub_dual(q[6], q[3]);
    449       // part of stage 5
    450       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[4], &q[6]);
    451       store_in_output(out, 19, 28, q[4], q[6]);
    452 
    453       // -----------------------------------------
    454       // BLOCK B: 20-23,24-27
    455       // -----------------------------------------
    456       // generate 20,21,26,27
    457       // part of stage 1
    458       load_from_transformed(trans_buf, 5, 27, &q[14], &q[13]);
    459       do_butterfly(q[14], q[13], cospi_27_64, cospi_5_64, &q[0], &q[2]);
    460       load_from_transformed(trans_buf, 21, 11, &q[14], &q[13]);
    461       do_butterfly(q[14], q[13], cospi_11_64, cospi_21_64, &q[1], &q[3]);
    462       // part of stage 2
    463       q[13] = highbd_idct_sub_dual(q[0], q[1]);
    464       q[0] = highbd_idct_add_dual(q[0], q[1]);
    465       q[14] = highbd_idct_sub_dual(q[2], q[3]);
    466       q[2] = highbd_idct_add_dual(q[2], q[3]);
    467       // part of stage 3
    468       do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
    469 
    470       // generate 22,23,24,25
    471       // part of stage 1
    472       load_from_transformed(trans_buf, 13, 19, &q[14], &q[13]);
    473       do_butterfly(q[14], q[13], cospi_19_64, cospi_13_64, &q[5], &q[7]);
    474       load_from_transformed(trans_buf, 29, 3, &q[14], &q[13]);
    475       do_butterfly(q[14], q[13], cospi_3_64, cospi_29_64, &q[4], &q[6]);
    476       // part of stage 2
    477       q[14] = highbd_idct_sub_dual(q[4], q[5]);
    478       q[5] = highbd_idct_add_dual(q[4], q[5]);
    479       q[13] = highbd_idct_sub_dual(q[6], q[7]);
    480       q[6] = highbd_idct_add_dual(q[6], q[7]);
    481       // part of stage 3
    482       do_butterfly(q[14], q[13], -cospi_20_64, -cospi_12_64, &q[4], &q[7]);
    483       // part of stage 4
    484       q[10] = highbd_idct_add_dual(q[7], q[1]);
    485       q[11] = highbd_idct_add_dual(q[5], q[0]);
    486       q[12] = highbd_idct_add_dual(q[6], q[2]);
    487       q[15] = highbd_idct_add_dual(q[4], q[3]);
    488       // part of stage 6
    489       load_from_output(out, 16, 17, &q[14], &q[13]);
    490       q[8] = highbd_idct_add_dual(q[14], q[11]);
    491       q[9] = highbd_idct_add_dual(q[13], q[10]);
    492       q[13] = highbd_idct_sub_dual(q[13], q[10]);
    493       q[11] = highbd_idct_sub_dual(q[14], q[11]);
    494       store_in_output(out, 17, 16, q[9], q[8]);
    495       load_from_output(out, 30, 31, &q[14], &q[9]);
    496       q[8] = highbd_idct_sub_dual(q[9], q[12]);
    497       q[10] = highbd_idct_add_dual(q[14], q[15]);
    498       q[14] = highbd_idct_sub_dual(q[14], q[15]);
    499       q[12] = highbd_idct_add_dual(q[9], q[12]);
    500       store_in_output(out, 30, 31, q[10], q[12]);
    501       // part of stage 7
    502       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]);
    503       store_in_output(out, 25, 22, q[14], q[13]);
    504       do_butterfly(q[8], q[11], cospi_16_64, cospi_16_64, &q[13], &q[14]);
    505       store_in_output(out, 24, 23, q[14], q[13]);
    506       // part of stage 4
    507       q[14] = highbd_idct_sub_dual(q[5], q[0]);
    508       q[13] = highbd_idct_sub_dual(q[6], q[2]);
    509       do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[5], &q[6]);
    510       q[14] = highbd_idct_sub_dual(q[7], q[1]);
    511       q[13] = highbd_idct_sub_dual(q[4], q[3]);
    512       do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[0], &q[1]);
    513       // part of stage 6
    514       load_from_output(out, 18, 19, &q[14], &q[13]);
    515       q[8] = highbd_idct_add_dual(q[14], q[1]);
    516       q[9] = highbd_idct_add_dual(q[13], q[6]);
    517       q[13] = highbd_idct_sub_dual(q[13], q[6]);
    518       q[1] = highbd_idct_sub_dual(q[14], q[1]);
    519       store_in_output(out, 18, 19, q[8], q[9]);
    520       load_from_output(out, 28, 29, &q[8], &q[9]);
    521       q[14] = highbd_idct_sub_dual(q[8], q[5]);
    522       q[10] = highbd_idct_add_dual(q[8], q[5]);
    523       q[11] = highbd_idct_add_dual(q[9], q[0]);
    524       q[0] = highbd_idct_sub_dual(q[9], q[0]);
    525       store_in_output(out, 28, 29, q[10], q[11]);
    526       // part of stage 7
    527       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]);
    528       store_in_output(out, 20, 27, q[13], q[14]);
    529       do_butterfly(q[0], q[1], cospi_16_64, cospi_16_64, &q[1], &q[0]);
    530       store_in_output(out, 21, 26, q[1], q[0]);
    531 
    532       // -----------------------------------------
    533       // BLOCK C: 8-10,11-15
    534       // -----------------------------------------
    535       // generate 8,9,14,15
    536       // part of stage 2
    537       load_from_transformed(trans_buf, 2, 30, &q[14], &q[13]);
    538       do_butterfly(q[14], q[13], cospi_30_64, cospi_2_64, &q[0], &q[2]);
    539       load_from_transformed(trans_buf, 18, 14, &q[14], &q[13]);
    540       do_butterfly(q[14], q[13], cospi_14_64, cospi_18_64, &q[1], &q[3]);
    541       // part of stage 3
    542       q[13] = highbd_idct_sub_dual(q[0], q[1]);
    543       q[0] = highbd_idct_add_dual(q[0], q[1]);
    544       q[14] = highbd_idct_sub_dual(q[2], q[3]);
    545       q[2] = highbd_idct_add_dual(q[2], q[3]);
    546       // part of stage 4
    547       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[1], &q[3]);
    548 
    549       // generate 10,11,12,13
    550       // part of stage 2
    551       load_from_transformed(trans_buf, 10, 22, &q[14], &q[13]);
    552       do_butterfly(q[14], q[13], cospi_22_64, cospi_10_64, &q[5], &q[7]);
    553       load_from_transformed(trans_buf, 26, 6, &q[14], &q[13]);
    554       do_butterfly(q[14], q[13], cospi_6_64, cospi_26_64, &q[4], &q[6]);
    555       // part of stage 3
    556       q[14] = highbd_idct_sub_dual(q[4], q[5]);
    557       q[5] = highbd_idct_add_dual(q[4], q[5]);
    558       q[13] = highbd_idct_sub_dual(q[6], q[7]);
    559       q[6] = highbd_idct_add_dual(q[6], q[7]);
    560       // part of stage 4
    561       do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[4], &q[7]);
    562       // part of stage 5
    563       q[8] = highbd_idct_add_dual(q[0], q[5]);
    564       q[9] = highbd_idct_add_dual(q[1], q[7]);
    565       q[13] = highbd_idct_sub_dual(q[1], q[7]);
    566       q[14] = highbd_idct_sub_dual(q[3], q[4]);
    567       q[10] = highbd_idct_add_dual(q[3], q[4]);
    568       q[15] = highbd_idct_add_dual(q[2], q[6]);
    569       store_in_output(out, 8, 15, q[8], q[15]);
    570       store_in_output(out, 9, 14, q[9], q[10]);
    571       // part of stage 6
    572       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
    573       store_in_output(out, 13, 10, q[3], q[1]);
    574       q[13] = highbd_idct_sub_dual(q[0], q[5]);
    575       q[14] = highbd_idct_sub_dual(q[2], q[6]);
    576       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
    577       store_in_output(out, 11, 12, q[1], q[3]);
    578 
    579       // -----------------------------------------
    580       // BLOCK D: 0-3,4-7
    581       // -----------------------------------------
    582       // generate 4,5,6,7
    583       // part of stage 3
    584       load_from_transformed(trans_buf, 4, 28, &q[14], &q[13]);
    585       do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[0], &q[2]);
    586       load_from_transformed(trans_buf, 20, 12, &q[14], &q[13]);
    587       do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
    588       // part of stage 4
    589       q[13] = highbd_idct_sub_dual(q[0], q[1]);
    590       q[0] = highbd_idct_add_dual(q[0], q[1]);
    591       q[14] = highbd_idct_sub_dual(q[2], q[3]);
    592       q[2] = highbd_idct_add_dual(q[2], q[3]);
    593       // part of stage 5
    594       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
    595 
    596       // generate 0,1,2,3
    597       // part of stage 4
    598       load_from_transformed(trans_buf, 0, 16, &q[14], &q[13]);
    599       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[5], &q[7]);
    600       load_from_transformed(trans_buf, 8, 24, &q[14], &q[13]);
    601       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[14], &q[6]);
    602       // part of stage 5
    603       q[4] = highbd_idct_add_dual(q[7], q[6]);
    604       q[7] = highbd_idct_sub_dual(q[7], q[6]);
    605       q[6] = highbd_idct_sub_dual(q[5], q[14]);
    606       q[5] = highbd_idct_add_dual(q[5], q[14]);
    607       // part of stage 6
    608       q[8] = highbd_idct_add_dual(q[4], q[2]);
    609       q[9] = highbd_idct_add_dual(q[5], q[3]);
    610       q[10] = highbd_idct_add_dual(q[6], q[1]);
    611       q[11] = highbd_idct_add_dual(q[7], q[0]);
    612       q[12] = highbd_idct_sub_dual(q[7], q[0]);
    613       q[13] = highbd_idct_sub_dual(q[6], q[1]);
    614       q[14] = highbd_idct_sub_dual(q[5], q[3]);
    615       q[15] = highbd_idct_sub_dual(q[4], q[2]);
    616       // part of stage 7
    617       load_from_output(out, 14, 15, &q[0], &q[1]);
    618       q[2] = highbd_idct_add_dual(q[8], q[1]);
    619       q[3] = highbd_idct_add_dual(q[9], q[0]);
    620       q[4] = highbd_idct_sub_dual(q[9], q[0]);
    621       q[5] = highbd_idct_sub_dual(q[8], q[1]);
    622       load_from_output(out, 16, 17, &q[0], &q[1]);
    623       q[8] = highbd_idct_add_dual(q[4], q[1]);
    624       q[9] = highbd_idct_add_dual(q[5], q[0]);
    625       q[6] = highbd_idct_sub_dual(q[5], q[0]);
    626       q[7] = highbd_idct_sub_dual(q[4], q[1]);
    627 
    628       if (idct32_pass_loop == 0) {
    629         idct32_bands_end_1st_pass(out, q);
    630       } else {
    631         const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
    632         idct32_bands_end_2nd_pass(out, dst, stride, max, q);
    633         dst += 8;
    634       }
    635     }
    636   }
    637 }
    638 
    639 void vpx_highbd_idct32x32_1024_add_neon(const tran_low_t *input, uint16_t *dest,
    640                                         int stride, int bd) {
    641   if (bd == 8) {
    642     vpx_idct32_32_neon(input, CAST_TO_BYTEPTR(dest), stride, 1);
    643   } else {
    644     vpx_highbd_idct32_32_neon(input, dest, stride, bd);
    645   }
    646 }
    647