Home | History | Annotate | Download | only in arm64
      1 ///*****************************************************************************
      2 //*
      3 //* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
      4 //*
      5 //* Licensed under the Apache License, Version 2.0 (the "License");
      6 //* you may not use this file except in compliance with the License.
      7 //* You may obtain a copy of the License at:
      8 //*
      9 //* http://www.apache.org/licenses/LICENSE-2.0
     10 //*
     11 //* Unless required by applicable law or agreed to in writing, software
     12 //* distributed under the License is distributed on an "AS IS" BASIS,
     13 //* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 //* See the License for the specific language governing permissions and
     15 //* limitations under the License.
     16 //*
     17 //*****************************************************************************/
     18 ///**
     19 //*******************************************************************************
     20 //* @file
     21 //*  ihevc_intra_pred_luma_mode_19_to_25.s
     22 //*
     23 //* @brief
     24 //*  contains function definitions for intra prediction dc filtering.
     25 //* functions are coded using neon  intrinsics and can be compiled using
     26 
     27 //* rvct
     28 //*
     29 //* @author
     30 //*  naveen sr
     31 //*
     32 //* @par list of functions:
     33 //*
     34 //*
     35 //* @remarks
     36 //*  none
     37 //*
     38 //*******************************************************************************
     39 //*/
     40 ///**
     41 //*******************************************************************************
     42 //*
     43 //* @brief
     44 //*    luma intraprediction filter for dc input
     45 //*
     46 //* @par description:
     47 //*
     48 //* @param[in] pu1_ref
     49 //*  uword8 pointer to the source
     50 //*
     51 //* @param[out] pu1_dst
     52 //*  uword8 pointer to the destination
     53 //*
     54 //* @param[in] src_strd
     55 //*  integer source stride
     56 //*
     57 //* @param[in] dst_strd
     58 //*  integer destination stride
     59 //*
     60 //* @param[in] nt
     61 //*  size of tranform block
     62 //*
     63 //* @param[in] mode
     64 //*  type of filtering
     65 //*
     66 //* @returns
     67 //*
     68 //* @remarks
     69 //*  none
     70 //*
     71 //*******************************************************************************
     72 //*/
     73 
     74 //void ihevc_intra_pred_luma_mode_19_to_25(uword8* pu1_ref,
     75 //                               word32 src_strd,
     76 //                               uword8* pu1_dst,
     77 //                               word32 dst_strd,
     78 //                               word32 nt,
     79 //                               word32 mode)
     80 //
     81 //**************variables vs registers*****************************************
     82 //x0 => *pu1_ref
     83 //x1 => src_strd
     84 //x2 => *pu1_dst
     85 //x3 => dst_strd
     86 
     87 //stack contents from #40
     88 //    nt
     89 //    mode
     90 
     91 .text
     92 .align 4
     93 .include "ihevc_neon_macros.s"
     94 
     95 
     96 
     97 .globl ihevc_intra_pred_luma_mode_19_to_25_av8
     98 .extern gai4_ihevc_ang_table
     99 .extern gai4_ihevc_inv_ang_table
    100 .extern gau1_ihevc_planar_factor
    101 
    102 .type ihevc_intra_pred_luma_mode_19_to_25_av8, %function
    103 
    104 ihevc_intra_pred_luma_mode_19_to_25_av8:
    105 
    106     // stmfd sp!, {x4-x12, x14}            //stack stores the values of the arguments
    107 
    108     stp         d9,d10,[sp,#-16]!
    109     stp         d12,d13,[sp,#-16]!
    110     stp         d14,d15,[sp,#-16]!
    111     stp         x19, x20,[sp,#-16]!
    112 
    113     adrp        x7,  :got:gai4_ihevc_ang_table
    114     ldr         x7,  [x7, #:got_lo12:gai4_ihevc_ang_table]
    115 
    116     adrp        x8,  :got:gai4_ihevc_inv_ang_table
    117     ldr         x8,  [x8, #:got_lo12:gai4_ihevc_inv_ang_table]
    118 
    119     add         x7, x7, x5, lsl #2          //gai4_ihevc_ang_table[mode]
    120     add         x8, x8, x5, lsl #2          //gai4_ihevc_inv_ang_table
    121     sub         x8, x8, #48                 //gai4_ihevc_inv_ang_table[mode - 12]
    122 
    123     ldr         w7,  [x7]                   //intra_pred_ang
    124     sxtw        x7,w7
    125     sub         sp, sp, #132                //ref_temp[2 * max_cu_size + 1]
    126 
    127     ldr         w8,  [x8]                   //inv_ang
    128     sxtw        x8,w8
    129     add         x6, sp, x4                  //ref_temp + nt
    130 
    131     mul         x9, x4, x7                  //nt*intra_pred_ang
    132 
    133     sub         x6, x6, #1                  //ref_temp + nt - 1
    134 
    135     add         x1, x0, x4, lsl #1          //x1 = &src[2nt]
    136     dup         v30.8b,w7                   //intra_pred_ang
    137 
    138     mov         x7, x4
    139 
    140     asr         x9, x9, #5
    141 
    142     ld1         {v0.s}[0],[x1],#4           // pu1_ref[two_nt + k]
    143 
    144     st1         {v0.s}[0],[x6],#4           //ref_temp[k + nt - 1] = pu1_ref[two_nt + k]//
    145 
    146     subs        x7, x7, #4
    147     beq         end_loop_copy
    148     sub         x1, x1,#4
    149     sub         x6, x6,#4
    150     subs        x7,x7,#4
    151     beq         loop_copy_8
    152     subs        x7,x7,#8
    153     beq         loop_copy_16
    154 
    155 loop_copy_32:
    156     ld1         {v0.8b},[x1],#8
    157     ld1         {v1.8b},[x1],#8
    158     ld1         {v2.8b},[x1],#8
    159     ld1         {v3.8b},[x1],#8
    160 
    161     st1         {v0.8b},[x6],#8
    162     st1         {v1.8b},[x6],#8
    163     st1         {v2.8b},[x6],#8
    164     st1         {v3.8b},[x6],#8
    165     b           end_loop_copy
    166 
    167 loop_copy_16:
    168     ld1         {v0.8b},[x1],#8
    169     ld1         {v1.8b},[x1],#8
    170 
    171     st1         {v0.8b},[x6],#8
    172     st1         {v1.8b},[x6],#8
    173     b           end_loop_copy
    174 
    175 loop_copy_8:
    176     ld1         {v0.8b},[x1],#8
    177     st1         {v0.8b},[x6],#8
    178 
    179 end_loop_copy:
    180 
    181     ldrb        w11, [x1]
    182     sxtw        x11,w11
    183     strb        w11, [x6]
    184     sxtw        x11,w11
    185 
    186     cmp         x9, #-1
    187     bge         linear_filtering
    188 
    189     add         x6, sp, x4                  //ref_temp + nt
    190     sub         x6, x6, #2                  //ref_temp + nt - 2
    191 
    192     mov         x12, #-1
    193 
    194     sub         x20, x9, x12                //count to take care off ref_idx
    195     neg         x9, x20
    196 
    197     add         x1, x0, x4, lsl #1          //x1 = &src[2nt]
    198 
    199     mov         x7, #128                    //inv_ang_sum
    200 
    201 loop_copy_ref_idx:
    202 
    203     add         x7, x7, x8                  //inv_ang_sum += inv_ang
    204     lsr         x14, x7, #8
    205     neg         x20,x14
    206     ldrb        w11, [x1, x20]
    207     sxtw        x11,w11
    208 //    ldrb        x11, [x1, -x7, lsr #8]
    209     strb        w11, [x6], #-1
    210     sxtw        x11,w11
    211 
    212     subs        x9, x9, #1
    213 
    214     bne         loop_copy_ref_idx
    215 
    216 
    217 linear_filtering:
    218 //    after copy
    219 //    below code is taken from mode 27 to 33 and modified
    220 
    221     adrp        x6,  :got:gai4_ihevc_ang_table //loads word32 gai4_ihevc_ang_table[35]
    222     ldr         x6,  [x6, #:got_lo12:gai4_ihevc_ang_table]
    223 
    224     add         x8,x6,x5,lsl #2             //*gai4_ihevc_ang_table[mode]
    225     ldr         w9, [x8]                    //intra_pred_ang = gai4_ihevc_ang_table[mode]
    226     sxtw        x9,w9
    227     adrp        x1, :got:gau1_ihevc_planar_factor //used for ((row + 1) * intra_pred_ang) row values
    228     ldr         x1, [x1, #:got_lo12:gau1_ihevc_planar_factor]
    229     add         x6,x1,#1
    230 
    231     add         x8, sp, x4                  //ref_temp + nt
    232     sub         x8, x8,#1                   //ref_temp + nt -1
    233 
    234     tst         x4,#7
    235     mov         x14,#0                      //row
    236     mov         x12,x4
    237     bne         core_loop_4
    238 
    239 core_loop_8:
    240     add         x8,x8,#1                    //pu1_ref_main_idx += (two_nt + 1)
    241     dup         v0.8b,w9                    //intra_pred_ang
    242     lsr         x12, x4, #3                 //divide by 8
    243 
    244     movi        v1.8b, #32
    245     mul         x7, x4, x12
    246 
    247     movi        v6.8h, #31
    248     //lsl            x12,x3,#3
    249 
    250     mov         x1,x8
    251     //sub            x12,x12,x4
    252     mov         x5,x4
    253     mov         x11,#1
    254 
    255 prologue:
    256     ld1         {v3.8b},[x6]                //loads the row value
    257     smull       v2.8h, v3.8b, v0.8b         //pos = ((row + 1) * intra_pred_ang)
    258     and         v4.16b,  v2.16b ,  v6.16b   //dup_const_fract(fract = pos & (31))
    259     xtn         v4.8b,  v4.8h
    260     shrn        v5.8b, v2.8h,#5             //idx = pos >> 5
    261 
    262     dup         v31.8b, v4.8b[0]
    263     add         x0,x2,x3
    264 
    265     umov        w14, v5.2s[0]               //(i row)extract idx to the r register
    266     sxtw        x14,w14
    267 
    268     dup         v29.8b, v4.8b[1]            //(ii)
    269     sbfx        x9,x14,#0,#8
    270 
    271     add         x10,x8,x9                   //(i row)*pu1_ref[ref_main_idx]
    272 
    273     ld1         {v23.8b},[x10],x11          //(i row)ref_main_idx
    274     sbfx        x9,x14,#8,#8
    275 
    276     ld1         {v9.8b},[x10]               //(i row)ref_main_idx_1
    277     add         x12,x8,x9                   //(ii)*pu1_ref[ref_main_idx]
    278 
    279     sbfx        x9,x14,#16,#8
    280     sub         v30.8b,  v1.8b ,  v31.8b    //32-fract(dup_const_32_fract)
    281     add         x10,x8,x9                   //(iii)*pu1_ref[ref_main_idx]
    282 
    283     ld1         {v12.8b},[x12],x11          //(ii)ref_main_idx
    284     umull       v10.8h, v23.8b, v30.8b      //(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
    285 
    286     ld1         {v13.8b},[x12]              //(ii)ref_main_idx_1
    287     umlal       v10.8h, v9.8b, v31.8b       //(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
    288 
    289     dup         v27.8b, v4.8b[2]            //(iii)
    290     sub         v28.8b,  v1.8b ,  v29.8b    //(ii)32-fract(dup_const_32_fract)
    291     sbfx        x9,x14,#24,#8
    292 
    293     dup         v25.8b, v4.8b[3]            //(iv)
    294     umull       v14.8h, v12.8b, v28.8b      //(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
    295     add         x12,x8,x9                   //(iv)*pu1_ref[ref_main_idx]
    296 
    297     ld1         {v16.8b},[x10],x11          //(iii)ref_main_idx
    298     umlal       v14.8h, v13.8b, v29.8b      //(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
    299 
    300     ld1         {v17.8b},[x10]              //(iii)ref_main_idx_1
    301     rshrn       v10.8b, v10.8h,#5           //(i row)shift_res = vrshrn_n_u16(add_res, 5)
    302 
    303     ld1         {v20.8b},[x12],x11          //(iv)ref_main_idx
    304     sub         v26.8b,  v1.8b ,  v27.8b    //(iii)32-fract(dup_const_32_fract)
    305 
    306     ld1         {v21.8b},[x12]              //(iv)ref_main_idx_1
    307 
    308     dup         v31.8b, v4.8b[4]            //(v)
    309     umull       v18.8h, v16.8b, v26.8b      //(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
    310 
    311     umov        w14, v5.2s[1]               //extract idx to the r register
    312     sxtw        x14,w14
    313     umlal       v18.8h, v17.8b, v27.8b      //(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
    314 
    315     st1         {v10.8b},[x2],#8            //(i row)
    316     rshrn       v14.8b, v14.8h,#5           //(ii)shift_res = vrshrn_n_u16(add_res, 5)
    317 
    318     sbfx        x9,x14,#0,#8
    319     dup         v29.8b, v4.8b[5]            //(vi)
    320     add         x10,x8,x9                   //(v)*pu1_ref[ref_main_idx]
    321 
    322     ld1         {v23.8b},[x10],x11          //(v)ref_main_idx
    323     sub         v24.8b,  v1.8b ,  v25.8b    //(iv)32-fract(dup_const_32_fract)
    324 
    325     umull       v22.8h, v20.8b, v24.8b      //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
    326     sbfx        x9,x14,#8,#8
    327 
    328     ld1         {v9.8b},[x10]               //(v)ref_main_idx_1
    329     umlal       v22.8h, v21.8b, v25.8b      //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
    330 
    331     st1         {v14.8b},[x0],x3            //(ii)
    332     rshrn       v18.8b, v18.8h,#5           //(iii)shift_res = vrshrn_n_u16(add_res, 5)
    333 
    334     add         x12,x8,x9                   //(vi)*pu1_ref[ref_main_idx]
    335     dup         v27.8b, v4.8b[6]            //(vii)
    336 
    337     sbfx        x9,x14,#16,#8
    338     sub         v30.8b,  v1.8b ,  v31.8b    //(v)32-fract(dup_const_32_fract)
    339     add         x10,x8,x9                   //(vii)*pu1_ref[ref_main_idx]
    340 
    341     ld1         {v12.8b},[x12],x11          //(vi)ref_main_idx
    342     umull       v10.8h, v23.8b, v30.8b      //(v)vmull_u8(ref_main_idx, dup_const_32_fract)
    343 
    344     ld1         {v13.8b},[x12]              //(vi)ref_main_idx_1
    345     umlal       v10.8h, v9.8b, v31.8b       //(v)vmull_u8(ref_main_idx_1, dup_const_fract)
    346 
    347     st1         {v18.8b},[x0],x3            //(iii)
    348     rshrn       v22.8b, v22.8h,#5           //(iv)shift_res = vrshrn_n_u16(add_res, 5)
    349 
    350     dup         v25.8b, v4.8b[7]            //(viii)
    351     sbfx        x9,x14,#24,#8
    352 
    353     ld1         {v16.8b},[x10],x11          //(vii)ref_main_idx
    354     sub         v28.8b,  v1.8b ,  v29.8b    //(vi)32-fract(dup_const_32_fract)
    355 
    356     ld1         {v17.8b},[x10]              //(vii)ref_main_idx_1
    357     umull       v14.8h, v12.8b, v28.8b      //(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
    358 
    359     add         x12,x8,x9                   //(viii)*pu1_ref[ref_main_idx]
    360     umlal       v14.8h, v13.8b, v29.8b      //(vi)vmull_u8(ref_main_idx_1, dup_const_fract)
    361     subs        x4,x4,#8
    362 
    363     st1         {v22.8b},[x0],x3            //(iv)
    364     rshrn       v10.8b, v10.8h,#5           //(v)shift_res = vrshrn_n_u16(add_res, 5)
    365 
    366     ld1         {v20.8b},[x12],x11          //(viii)ref_main_idx
    367     sub         v26.8b,  v1.8b ,  v27.8b    //(vii)32-fract(dup_const_32_fract)
    368 
    369     ld1         {v21.8b},[x12]              //(viii)ref_main_idx_1
    370     umull       v18.8h, v16.8b, v26.8b      //(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
    371 
    372     add         x20,x8,#8
    373     csel        x8, x20, x8,gt
    374     umlal       v18.8h, v17.8b, v27.8b      //(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
    375     sub         x20,x7,#8
    376     csel        x7, x20, x7,gt
    377 
    378     st1         {v10.8b},[x0],x3            //(v)
    379     rshrn       v14.8b, v14.8h,#5           //(vi)shift_res = vrshrn_n_u16(add_res, 5)
    380 
    381     beq         epilogue
    382 
    383     ld1         {v5.8b},[x6]                //loads the row value
    384     smull       v2.8h, v5.8b, v0.8b         //pos = ((row + 1) * intra_pred_ang)
    385     and         v4.16b,  v2.16b ,  v6.16b   //dup_const_fract(fract = pos & (31))
    386     xtn         v4.8b,  v4.8h
    387     shrn        v3.8b, v2.8h,#5             //idx = pos >> 5
    388     umov        w14, v3.2s[0]               //(i)extract idx to the r register
    389     sxtw        x14,w14
    390     sbfx        x9,x14,#0,#8
    391     add         x10,x8,x9                   //(i)*pu1_ref[ref_main_idx]
    392 
    393 kernel_8_rows:
    394     dup         v31.8b, v4.8b[0]
    395     subs        x4,x4,#8
    396     sbfx        x9,x14,#8,#8
    397 
    398     ld1         {v23.8b},[x10],x11          //(i)ref_main_idx
    399     sub         v24.8b,  v1.8b ,  v25.8b    //(viii)32-fract(dup_const_32_fract)
    400 
    401     add         x20,x6,#8                   //increment the row value
    402     csel        x6, x20, x6,le
    403     add         x12,x8,x9                   //(ii)*pu1_ref[ref_main_idx]
    404 
    405     ld1         {v9.8b},[x10]               //(i)ref_main_idx_1
    406     umull       v22.8h, v20.8b, v24.8b      //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
    407 
    408     ld1         {v5.8b},[x6]                //loads the row value
    409     umlal       v22.8h, v21.8b, v25.8b      //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
    410 
    411     dup         v29.8b, v4.8b[1]            //(ii)
    412     rshrn       v18.8b, v18.8h,#5           //(vii)shift_res = vrshrn_n_u16(add_res, 5)
    413 
    414     sbfx        x9,x14,#16,#8
    415 
    416     st1         {v14.8b},[x0],x3            //(vi)
    417     sub         v30.8b,  v1.8b ,  v31.8b    //(i)32-fract(dup_const_32_fract)
    418 
    419     add         x10,x8,x9                   //(iii)*pu1_ref[ref_main_idx]
    420 
    421     ld1         {v12.8b},[x12],x11          //(ii)ref_main_idx
    422     umull       v10.8h, v23.8b, v30.8b      //(i)vmull_u8(ref_main_idx, dup_const_32_fract)
    423 
    424     ld1         {v13.8b},[x12]              //(ii)ref_main_idx_1
    425     umlal       v10.8h, v9.8b, v31.8b       //(i)vmull_u8(ref_main_idx_1, dup_const_fract)
    426 
    427     sbfx        x9,x14,#24,#8
    428     csel        x4, x5, x4,le               //reload nt
    429 
    430     umov        w14, v3.2s[1]               //extract idx to the r register
    431     sxtw        x14,w14
    432     rshrn       v22.8b, v22.8h,#5           //(viii)shift_res = vrshrn_n_u16(add_res, 5)
    433 
    434     dup         v27.8b, v4.8b[2]            //(iii)
    435     sub         v28.8b,  v1.8b ,  v29.8b    //(ii)32-fract(dup_const_32_fract)
    436     add         x12,x8,x9                   //(iv)*pu1_ref[ref_main_idx]
    437 
    438     ld1         {v16.8b},[x10],x11          //(iii)ref_main_idx
    439     umull       v14.8h, v12.8b, v28.8b      //(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
    440 
    441     st1         {v18.8b},[x0],x3            //(vii)
    442     umlal       v14.8h, v13.8b, v29.8b      //(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
    443 
    444     ld1         {v17.8b},[x10]              //(iii)ref_main_idx_1
    445     rshrn       v10.8b, v10.8h,#5           //(i)shift_res = vrshrn_n_u16(add_res, 5)
    446 
    447     dup         v25.8b, v4.8b[3]            //(iv)
    448     smull       v2.8h, v5.8b, v0.8b         //pos = ((row + 1) * intra_pred_ang)
    449 
    450     st1         {v22.8b},[x0]               //(viii)
    451     sub         v26.8b,  v1.8b ,  v27.8b    //(iii)32-fract(dup_const_32_fract)
    452 
    453     ld1         {v20.8b},[x12],x11          //(iv)ref_main_idx
    454     umull       v18.8h, v16.8b, v26.8b      //(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
    455 
    456     ld1         {v21.8b},[x12]              //(iv)ref_main_idx_1
    457     umlal       v18.8h, v17.8b, v27.8b      //(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
    458 
    459     sbfx        x9,x14,#0,#8
    460     add         x0,x2,x3
    461 
    462     dup         v31.8b, v4.8b[4]            //(v)
    463     rshrn       v14.8b, v14.8h,#5           //(ii)shift_res = vrshrn_n_u16(add_res, 5)
    464 
    465     add         x10,x8,x9                   //(v)*pu1_ref[ref_main_idx]
    466     sbfx        x9,x14,#8,#8
    467 
    468     st1         {v10.8b},[x2],#8            //(i)
    469     sub         v24.8b,  v1.8b ,  v25.8b    //(iv)32-fract(dup_const_32_fract)
    470 
    471     dup         v29.8b, v4.8b[5]            //(vi)
    472     umull       v22.8h, v20.8b, v24.8b      //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
    473 
    474     dup         v27.8b, v4.8b[6]            //(vii)
    475     umlal       v22.8h, v21.8b, v25.8b      //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
    476 
    477     add         x12,x8,x9                   //(vi)*pu1_ref[ref_main_idx]
    478     sbfx        x9,x14,#16,#8
    479 
    480     dup         v25.8b, v4.8b[7]            //(viii)
    481     rshrn       v18.8b, v18.8h,#5           //(iii)shift_res = vrshrn_n_u16(add_res, 5)
    482 
    483     ld1         {v23.8b},[x10],x11          //(v)ref_main_idx
    484     and         v4.16b,  v2.16b ,  v6.16b   //dup_const_fract(fract = pos & (31))
    485 
    486     ld1         {v9.8b},[x10]               //(v)ref_main_idx_1
    487     shrn        v3.8b, v2.8h,#5             //idx = pos >> 5
    488 
    489     st1         {v14.8b},[x0],x3            //(ii)
    490     rshrn       v22.8b, v22.8h,#5           //(iv)shift_res = vrshrn_n_u16(add_res, 5)
    491 
    492     add         x10,x8,x9                   //(vii)*pu1_ref[ref_main_idx]
    493     sbfx        x9,x14,#24,#8
    494 
    495     ld1         {v12.8b},[x12],x11          //(vi)ref_main_idx
    496     sub         v30.8b,  v1.8b ,  v31.8b    //(v)32-fract(dup_const_32_fract)
    497 
    498     ld1         {v13.8b},[x12]              //(vi)ref_main_idx_1
    499     umull       v10.8h, v23.8b, v30.8b      //(v)vmull_u8(ref_main_idx, dup_const_32_fract)
    500 
    501     umov        w14, v3.2s[0]               //(i)extract idx to the r register
    502     sxtw        x14,w14
    503     umlal       v10.8h, v9.8b, v31.8b       //(v)vmull_u8(ref_main_idx_1, dup_const_fract)
    504 
    505     add         x12,x8,x9                   //(viii)*pu1_ref[ref_main_idx]
    506     csel        x8, x1, x8,le               //reload the source to pu1_src+2nt
    507 
    508     ld1         {v16.8b},[x10],x11          //(vii)ref_main_idx
    509     sub         v28.8b,  v1.8b ,  v29.8b    //(vi)32-fract(dup_const_32_fract)
    510 
    511     st1         {v18.8b},[x0],x3            //(iii)
    512     umull       v14.8h, v12.8b, v28.8b      //(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
    513 
    514     ld1         {v17.8b},[x10]              //(vii)ref_main_idx_1
    515     umlal       v14.8h, v13.8b, v29.8b      //(vi)vmull_u8(ref_main_idx_1, dup_const_fract)
    516 
    517     ld1         {v20.8b},[x12],x11          //(viii)ref_main_idx
    518     rshrn       v10.8b, v10.8h,#5           //(v)shift_res = vrshrn_n_u16(add_res, 5)
    519 
    520     ld1         {v21.8b},[x12]              //(viii)ref_main_idx_1
    521     sub         v26.8b,  v1.8b ,  v27.8b    //(vii)32-fract(dup_const_32_fract)
    522 
    523     add         x20,x8,#8                   //increment the source next set 8 columns in same row
    524     csel        x8, x20, x8,gt
    525     lsl         x20, x3,#3
    526     csel        x12,x20,x12,le
    527     sub         x20,x12,x5
    528     csel        x12, x20, x12,le
    529 
    530     st1         {v22.8b},[x0],x3            //(iv)
    531     umull       v18.8h, v16.8b, v26.8b      //(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
    532 
    533     st1         {v10.8b},[x0],x3            //(v)
    534     umlal       v18.8h, v17.8b, v27.8b      //(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
    535 
    536     add         x20,x2,x12                  //increment the dst pointer to 8*dst_strd - nt
    537     csel        x2, x20, x2,le
    538     sbfx        x9,x14,#0,#8
    539 
    540     xtn         v4.8b,  v4.8h
    541     rshrn       v14.8b, v14.8h,#5           //(vi)shift_res = vrshrn_n_u16(add_res, 5)
    542 
    543     subs        x7,x7,#8
    544     add         x10,x8,x9                   //(i)*pu1_ref[ref_main_idx]
    545 
    546     bne         kernel_8_rows
    547 
    548 epilogue:
    549     st1         {v14.8b},[x0],x3            //(vi)
    550     rshrn       v18.8b, v18.8h,#5           //(vii)shift_res = vrshrn_n_u16(add_res, 5)
    551 
    552     sub         v24.8b,  v1.8b ,  v25.8b    //(viii)32-fract(dup_const_32_fract)
    553     umull       v22.8h, v20.8b, v24.8b      //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
    554     umlal       v22.8h, v21.8b, v25.8b      //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
    555 
    556     st1         {v18.8b},[x0],x3            //(vii)
    557     rshrn       v22.8b, v22.8h,#5           //(viii)shift_res = vrshrn_n_u16(add_res, 5)
    558 
    559     st1         {v22.8b},[x0],x3            //(viii)
    560     b           end_loops
    561 
    562 core_loop_4:
    563     add         x6,x8,#1                    //pu1_ref_main_idx +=  1
    564     mov         x8,#0
    565 
    566     add         x5,x8,#1                    //row + 1
    567     mul         x5, x5, x9                  //pos = ((row + 1) * intra_pred_ang)
    568     asr         x14, x5, #5                 //if(fract_prev > fract)
    569     and         x5,x5,#31                   //fract = pos & (31)
    570     add         x10,x6,x14                  //pu1_ref_main_idx += 1
    571     add         x11,x10,#1                  //pu1_ref_main_idx_1 += 1
    572     dup         v0.8b,w5                    //dup_const_fract
    573     sub         x20,x5,#32
    574     neg         x4, x20
    575     dup         v1.8b,w4                    //dup_const_32_fract
    576 
    577 //inner_loop_4
    578     ld1         {v2.s}[0],[x10]             //ref_main_idx
    579     add         x8,x8,#1
    580 //    mov            x14,x5                            @fract_prev = fract
    581 
    582     ld1         {v3.s}[0],[x11]             //ref_main_idx_1
    583     add         x5,x8,#1                    //row + 1
    584     mul         x5, x5, x9                  //pos = ((row + 1) * intra_pred_ang)
    585     asr         x14, x5, #5                 // pos >> 5
    586     and         x5,x5,#31                   //fract = pos & (31)
    587     add         x10,x6,x14                  //pu1_ref_main_idx += 1
    588     add         x11,x10,#1                  //pu1_ref_main_idx_1 += 1
    589 
    590     dup         v6.8b,w5                    //dup_const_fract
    591     umull       v4.8h, v2.8b, v1.8b         //vmull_u8(ref_main_idx, dup_const_32_fract)
    592 
    593     sub         x20,x5,#32
    594     neg         x4, x20
    595     dup         v7.8b,w4                    //dup_const_32_fract
    596     umlal       v4.8h, v3.8b, v0.8b         //vmull_u8(ref_main_idx_1, dup_const_fract)
    597 
    598     ld1         {v23.s}[0],[x10]            //ref_main_idx
    599     add         x8,x8,#1
    600 
    601     ld1         {v9.s}[0],[x11]             //ref_main_idx_1
    602     rshrn       v4.8b, v4.8h,#5             //shift_res = vrshrn_n_u16(add_res, 5)
    603 
    604 //    mov            x14,x5                            @fract_prev = fract
    605     add         x5,x8,#1                    //row + 1
    606     mul         x5, x5, x9                  //pos = ((row + 1) * intra_pred_ang)
    607     asr         x14, x5, #5                 //if(fract_prev > fract)
    608     and         x5,x5,#31                   //fract = pos & (31)
    609     add         x10,x6,x14                  //ref_main + idx
    610     add         x11,x10,#1                  //pu1_ref_main_idx_1 += 1
    611 
    612     dup         v12.8b,w5                   //dup_const_fract
    613     umull       v10.8h, v23.8b, v7.8b       //vmull_u8(ref_main_idx, dup_const_32_fract)
    614 
    615     sub         x20,x5,#32
    616     neg         x4, x20
    617     dup         v13.8b,w4                   //dup_const_32_fract
    618     umlal       v10.8h, v9.8b, v6.8b        //vmull_u8(ref_main_idx_1, dup_const_fract)
    619 
    620     ld1         {v14.s}[0],[x10]            //ref_main_idx
    621     add         x8,x8,#1
    622 
    623     st1         {v4.s}[0],[x2],x3
    624     rshrn       v10.8b, v10.8h,#5           //shift_res = vrshrn_n_u16(add_res, 5)
    625 
    626     ld1         {v15.s}[0],[x11]            //ref_main_idx_1
    627 //    mov            x14,x5                            @fract_prev = fract
    628     add         x5,x8,#1                    //row + 1
    629     mul         x5, x5, x9                  //pos = ((row + 1) * intra_pred_ang)
    630     asr         x14, x5, #5                 //if(fract_prev > fract)
    631     and         x5,x5,#31                   //fract = pos & (31)
    632     add         x10,x6,x14                  //pu1_ref_main_idx += 1
    633     add         x11,x10,#1                  //pu1_ref_main_idx_1 += 1
    634 
    635     dup         v18.8b,w5                   //dup_const_fract
    636     umull       v16.8h, v14.8b, v13.8b      //vmull_u8(ref_main_idx, dup_const_32_fract)
    637 
    638     sub         x20,x5,#32
    639     neg         x4, x20
    640     dup         v19.8b,w4                   //dup_const_32_fract
    641     umlal       v16.8h, v15.8b, v12.8b      //vmull_u8(ref_main_idx_1, dup_const_fract)
    642 
    643     ld1         {v20.s}[0],[x10]            //ref_main_idx
    644 
    645     st1         {v10.s}[0],[x2],x3
    646     rshrn       v16.8b, v16.8h,#5           //shift_res = vrshrn_n_u16(add_res, 5)
    647     ld1         {v21.s}[0],[x11]            //ref_main_idx_1
    648 
    649     umull       v22.8h, v20.8b, v19.8b      //vmull_u8(ref_main_idx, dup_const_32_fract)
    650     umlal       v22.8h, v21.8b, v18.8b      //vmull_u8(ref_main_idx_1, dup_const_fract)
    651 
    652     st1         {v16.s}[0],[x2],x3
    653     rshrn       v22.8b, v22.8h,#5           //shift_res = vrshrn_n_u16(add_res, 5)
    654 
    655     st1         {v22.s}[0],[x2],x3
    656 
    657 end_loops:
    658     add         sp, sp, #132
    659     // ldmfd sp!,{x4-x12,x15}                  //reload the registers from sp
    660     ldp         x19, x20,[sp],#16
    661     ldp         d14,d15,[sp],#16
    662     ldp         d12,d13,[sp],#16
    663     ldp         d9,d10,[sp],#16
    664     ret
    665 
    666 
    667 
    668 
    669 
    670 
    671