Home | History | Annotate | Download | only in arm64
      1 ///*****************************************************************************
      2 //*
      3 //* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
      4 //*
      5 //* Licensed under the Apache License, Version 2.0 (the "License");
      6 //* you may not use this file except in compliance with the License.
      7 //* You may obtain a copy of the License at:
      8 //*
      9 //* http://www.apache.org/licenses/LICENSE-2.0
     10 //*
     11 //* Unless required by applicable law or agreed to in writing, software
     12 //* distributed under the License is distributed on an "AS IS" BASIS,
     13 //* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 //* See the License for the specific language governing permissions and
     15 //* limitations under the License.
     16 //*
     17 //*****************************************************************************/
     18 ///**
     19 //*******************************************************************************
     20 //* @file
     21 //*  ihevc_intra_pred_chroma_mode_19_to_25.s
     22 //*
     23 //* @brief
     24 //*  contains function definitions for intra prediction dc filtering.
     25 //* functions are coded using neon  intrinsics and can be compiled using
     26 
     27 //* rvct
     28 //*
     29 //* @author
     30 //*  naveen sr
     31 //*
     32 //* @par list of functions:
     33 //*
     34 //*
     35 //* @remarks
     36 //*  none
     37 //*
     38 //*******************************************************************************
     39 //*/
     40 ///**
     41 //*******************************************************************************
     42 //*
     43 //* @brief
     44 //*    chroma intraprediction filter for dc input
     45 //*
     46 //* @par description:
     47 //*
     48 //* @param[in] pu1_ref
     49 //*  uword8 pointer to the source
     50 //*
     51 //* @param[out] pu1_dst
     52 //*  uword8 pointer to the destination
     53 //*
     54 //* @param[in] src_strd
     55 //*  integer source stride
     56 //*
     57 //* @param[in] dst_strd
     58 //*  integer destination stride
     59 //*
     60 //* @param[in] nt
     61 //*  size of tranform block
     62 //*
     63 //* @param[in] mode
     64 //*  type of filtering
     65 //*
     66 //* @returns
     67 //*
     68 //* @remarks
     69 //*  none
     70 //*
     71 //*******************************************************************************
     72 //*/
     73 
     74 //void ihevc_intra_pred_chroma_mode_19_to_25(uword8* pu1_ref,
     75 //                               word32 src_strd,
     76 //                               uword8* pu1_dst,
     77 //                               word32 dst_strd,
     78 //                               word32 nt,
     79 //                               word32 mode)
     80 //
     81 //**************variables vs registers*****************************************
     82 //x0 => *pu1_ref
     83 //x1 => src_strd
     84 //x2 => *pu1_dst
     85 //x3 => dst_strd
     86 
     87 //stack contents from #40
     88 //    nt
     89 //    mode
     90 
     91 .text
     92 .align 4
     93 .include "ihevc_neon_macros.s"
     94 
     95 
     96 .globl ihevc_intra_pred_chroma_mode_19_to_25_av8
     97 .extern gai4_ihevc_ang_table
     98 .extern gai4_ihevc_inv_ang_table
     99 .extern gau1_ihevc_planar_factor
    100 
    101 .type ihevc_intra_pred_chroma_mode_19_to_25_av8, %function
    102 
    103 ihevc_intra_pred_chroma_mode_19_to_25_av8:
    104 
    105     // stmfd sp!, {x4-x12, x14}             //stack stores the values of the arguments
    106 
    107     stp         d12,d13,[sp,#-16]!
    108     stp         d8,d14,[sp,#-16]!           // Storing d14 using { sub sp,sp,#8; str d14,[sp] } is giving bus error.
    109                                             // d8 is used as dummy register and stored along with d14 using stp. d8 is not used in the function.
    110     stp         x19, x20,[sp,#-16]!
    111 
    112     adrp        x7,  :got:gai4_ihevc_ang_table
    113     ldr         x7,  [x7, #:got_lo12:gai4_ihevc_ang_table]
    114 
    115     adrp        x8,  :got:gai4_ihevc_inv_ang_table
    116     ldr         x8,  [x8, #:got_lo12:gai4_ihevc_inv_ang_table]
    117 
    118     add         x7, x7, x5, lsl #2          //gai4_ihevc_ang_table[mode]
    119     add         x8, x8, x5, lsl #2          //gai4_ihevc_inv_ang_table
    120     sub         x8, x8, #48                 //gai4_ihevc_inv_ang_table[mode - 12]
    121 
    122     ldr         w7,  [x7]                   //intra_pred_ang
    123     sxtw        x7,w7
    124     sub         sp, sp, #132                //ref_temp[2 * max_cu_size + 2]
    125 
    126     ldr         w8,  [x8]                   //inv_ang
    127     sxtw        x8,w8
    128     add         x6, sp, x4 , lsl #1         //ref_temp + 2 * nt
    129 
    130     mul         x9, x4, x7                  //nt*intra_pred_ang
    131 
    132     sub         x6, x6, #2                  //ref_temp + 2*nt - 2
    133 
    134     add         x1, x0, x4, lsl #2          //x1 = &src[4nt]
    135     dup         v30.8b,w7                   //intra_pred_ang
    136 
    137     mov         x7, x4
    138 
    139     asr         x9, x9, #5
    140 
    141     ld1         {v0.2s},[x1],#8             // pu1_ref[two_nt + k]
    142 
    143     st1         {v0.2s},[x6],#8             //ref_temp[k + nt - 1] = pu1_ref[two_nt + k]//
    144 
    145     subs        x7, x7, #4
    146     beq         end_loop_copy
    147     subs        x7,x7,#4
    148     beq         loop_copy_8
    149     subs        x7,x7,#8
    150     beq         loop_copy_16
    151 
    152 loop_copy_32:
    153     ld1         {v0.8b, v1.8b, v2.8b, v3.8b},[x1],#32
    154     ld1         {v4.8b, v5.8b, v6.8b},[x1],#24
    155 
    156     st1         {v0.8b, v1.8b, v2.8b, v3.8b},[x6],#32
    157 
    158 
    159     st1         {v4.8b, v5.8b, v6.8b},[x6],#24
    160     b           end_loop_copy
    161 
    162 loop_copy_16:
    163     ld1         {v0.8b, v1.8b, v2.8b},[x1],#24
    164     st1         {v0.8b, v1.8b, v2.8b},[x6],#24
    165 
    166     b           end_loop_copy
    167 
    168 loop_copy_8:
    169     ld1         {v0.8b},[x1],#8
    170     st1         {v0.8b},[x6],#8
    171 
    172 end_loop_copy:
    173 
    174     ldrh        w11, [x1]
    175     sxtw        x11,w11
    176     strh        w11, [x6]
    177     sxtw        x11,w11
    178 
    179     cmp         x9, #-1
    180     bge         linear_filtering
    181 
    182     add         x6, sp, x4 ,lsl #1          //ref_temp + 2 * nt
    183     sub         x6, x6, #4                  //ref_temp + 2 * nt - 2 - 2
    184 
    185     mov         x12, #-1
    186 
    187     sub         x20, x9, x12                //count to take care off ref_idx
    188     neg         x9, x20
    189 
    190     add         x1, x0, x4, lsl #2          //x1 = &src[2nt]
    191 
    192     mov         x7, #128                    //inv_ang_sum
    193 
    194 loop_copy_ref_idx:
    195 
    196     add         x7, x7, x8                  //inv_ang_sum += inv_ang
    197     lsr         x0, x7, #8
    198     lsl         x0, x0, #1
    199     neg         x20,x0
    200     ldrh        w11, [x1, x20]
    201     sxtw        x11,w11
    202     strh        w11, [x6], #-2
    203     sxtw        x11,w11
    204 
    205     subs        x9, x9, #1
    206 
    207     bne         loop_copy_ref_idx
    208 
    209 
    210 linear_filtering:
    211 //    after copy
    212 //    below code is taken from mode 27 to 33 and modified
    213 
    214 
    215     adrp        x6,  :got:gai4_ihevc_ang_table //loads word32 gai4_ihevc_ang_table[35]
    216     ldr         x6,  [x6, #:got_lo12:gai4_ihevc_ang_table]
    217 
    218     lsl         x7,x4,#2                    //four_nt
    219 
    220     add         x8,x6,x5,lsl #2             //*gai4_ihevc_ang_table[mode]
    221     ldr         w9, [x8]                    //intra_pred_ang = gai4_ihevc_ang_table[mode]
    222     sxtw        x9,w9
    223     adrp        x1, :got:gau1_ihevc_planar_factor //used for ((row + 1) * intra_pred_ang) row values
    224     ldr         x1, [x1, #:got_lo12:gau1_ihevc_planar_factor]
    225 
    226     add         x6,x1,#1
    227 
    228     add         x8, sp, x4, lsl #1          //ref_temp + 2 * nt
    229     sub         x8, x8,#2                   //ref_temp + 2*nt -2
    230 
    231     mov         x14,#0                      //row
    232     mov         x12,x4
    233     lsl         x4,x4,#1
    234 
    235 core_loop_8:
    236     add         x8,x8,#2                    //pu1_ref_main_idx += (four_nt + 1)
    237     dup         v0.8b,w9                    //intra_pred_ang
    238     lsr         x12, x4, #4                 //divide by 8
    239 
    240     movi        v1.8b, #32
    241     mul         x7, x4, x12
    242 
    243     movi        v6.8h, #31
    244 
    245 
    246     mov         x1,x8
    247 
    248     mov         x5,x4
    249     mov         x11,#2
    250 
    251 prologue:
    252     ld1         {v3.8b},[x6]                //loads the row value
    253     smull       v2.8h, v3.8b, v0.8b         //pos = ((row + 1) * intra_pred_ang)
    254     and         v4.16b,  v2.16b ,  v6.16b   //dup_const_fract(fract = pos & (31))
    255     xtn         v4.8b,  v4.8h
    256     shrn        v5.8b, v2.8h,#5             //idx = pos >> 5
    257     shl         v5.8b, v5.8b,#1
    258 
    259     dup         v31.8b, v4.8b[0]
    260     add         x0,x2,x3
    261 
    262     smov        x14, v5.2s[0]               //(i row)extract idx to the r register
    263 //    lsl            x14,x14,#1
    264 
    265     dup         v29.8b, v4.8b[1]            //(ii)
    266     sbfx        x9,x14,#0,#8
    267 
    268     add         x10,x8,x9                   //(i row)*pu1_ref[ref_main_idx]
    269 
    270     ld1         {v7.8b},[x10],x11           //(i row)ref_main_idx
    271     sbfx        x9,x14,#8,#8
    272 
    273     ld1         {v19.8b},[x10]              //(i row)ref_main_idx_1
    274     add         x12,x8,x9                   //(ii)*pu1_ref[ref_main_idx]
    275 
    276     sbfx        x9,x14,#16,#8
    277     sub         v30.8b,  v1.8b ,  v31.8b    //32-fract(dup_const_32_fract)
    278     add         x10,x8,x9                   //(iii)*pu1_ref[ref_main_idx]
    279 
    280     ld1         {v12.8b},[x12],x11          //(ii)ref_main_idx
    281     umull       v23.8h, v7.8b, v30.8b       //(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
    282 
    283     ld1         {v13.8b},[x12]              //(ii)ref_main_idx_1
    284     umlal       v23.8h, v19.8b, v31.8b      //(i row)vmull_u8(ref_main_idx_1, dup_const_fract)
    285 
    286     dup         v27.8b, v4.8b[2]            //(iii)
    287     sub         v28.8b,  v1.8b ,  v29.8b    //(ii)32-fract(dup_const_32_fract)
    288     sbfx        x9,x14,#24,#8
    289 
    290     dup         v25.8b, v4.8b[3]            //(iv)
    291     umull       v14.8h, v12.8b, v28.8b      //(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
    292     add         x12,x8,x9                   //(iv)*pu1_ref[ref_main_idx]
    293 
    294     ld1         {v16.8b},[x10],x11          //(iii)ref_main_idx
    295     umlal       v14.8h, v13.8b, v29.8b      //(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
    296 
    297     ld1         {v17.8b},[x10]              //(iii)ref_main_idx_1
    298     rshrn       v23.8b, v23.8h,#5           //(i row)shift_res = vrshrn_n_u16(add_res, 5)
    299 
    300     ld1         {v20.8b},[x12],x11          //(iv)ref_main_idx
    301     sub         v26.8b,  v1.8b ,  v27.8b    //(iii)32-fract(dup_const_32_fract)
    302 
    303     ld1         {v21.8b},[x12]              //(iv)ref_main_idx_1
    304 
    305     dup         v31.8b, v4.8b[4]            //(v)
    306     umull       v18.8h, v16.8b, v26.8b      //(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
    307 
    308     smov        x14, v5.2s[1]               //extract idx to the r register
    309     umlal       v18.8h, v17.8b, v27.8b      //(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
    310 //    lsl            x14,x14,#1
    311 
    312     st1         {v23.8b},[x2],#8            //(i row)
    313     rshrn       v14.8b, v14.8h,#5           //(ii)shift_res = vrshrn_n_u16(add_res, 5)
    314 
    315     sbfx        x9,x14,#0,#8
    316     dup         v29.8b, v4.8b[5]            //(vi)
    317     add         x10,x8,x9                   //(v)*pu1_ref[ref_main_idx]
    318 
    319     ld1         {v7.8b},[x10],x11           //(v)ref_main_idx
    320     sub         v24.8b,  v1.8b ,  v25.8b    //(iv)32-fract(dup_const_32_fract)
    321 
    322     umull       v22.8h, v20.8b, v24.8b      //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
    323     sbfx        x9,x14,#8,#8
    324 
    325     ld1         {v19.8b},[x10]              //(v)ref_main_idx_1
    326     umlal       v22.8h, v21.8b, v25.8b      //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
    327 
    328     st1         {v14.8b},[x0],x3            //(ii)
    329     rshrn       v18.8b, v18.8h,#5           //(iii)shift_res = vrshrn_n_u16(add_res, 5)
    330 
    331     add         x12,x8,x9                   //(vi)*pu1_ref[ref_main_idx]
    332     dup         v27.8b, v4.8b[6]            //(vii)
    333 
    334     sbfx        x9,x14,#16,#8
    335     sub         v30.8b,  v1.8b ,  v31.8b    //(v)32-fract(dup_const_32_fract)
    336     add         x10,x8,x9                   //(vii)*pu1_ref[ref_main_idx]
    337 
    338     ld1         {v12.8b},[x12],x11          //(vi)ref_main_idx
    339     umull       v23.8h, v7.8b, v30.8b       //(v)vmull_u8(ref_main_idx, dup_const_32_fract)
    340 
    341     ld1         {v13.8b},[x12]              //(vi)ref_main_idx_1
    342     umlal       v23.8h, v19.8b, v31.8b      //(v)vmull_u8(ref_main_idx_1, dup_const_fract)
    343 
    344     st1         {v18.8b},[x0],x3            //(iii)
    345     rshrn       v22.8b, v22.8h,#5           //(iv)shift_res = vrshrn_n_u16(add_res, 5)
    346 
    347     dup         v25.8b, v4.8b[7]            //(viii)
    348     sbfx        x9,x14,#24,#8
    349 
    350     ld1         {v16.8b},[x10],x11          //(vii)ref_main_idx
    351     sub         v28.8b,  v1.8b ,  v29.8b    //(vi)32-fract(dup_const_32_fract)
    352 
    353     ld1         {v17.8b},[x10]              //(vii)ref_main_idx_1
    354     umull       v14.8h, v12.8b, v28.8b      //(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
    355 
    356     add         x12,x8,x9                   //(viii)*pu1_ref[ref_main_idx]
    357     umlal       v14.8h, v13.8b, v29.8b      //(vi)vmull_u8(ref_main_idx_1, dup_const_fract)
    358     subs        x7,x7,#8
    359 
    360     st1         {v22.8b},[x0],x3            //(iv)
    361     cmp         x4,#8                       // go to end if 4x4
    362     beq         end_loops
    363 
    364     rshrn       v23.8b, v23.8h,#5           //(v)shift_res = vrshrn_n_u16(add_res, 5)
    365 
    366     ld1         {v20.8b},[x12],x11          //(viii)ref_main_idx
    367     sub         v26.8b,  v1.8b ,  v27.8b    //(vii)32-fract(dup_const_32_fract)
    368 
    369     ld1         {v21.8b},[x12]              //(viii)ref_main_idx_1
    370     umull       v18.8h, v16.8b, v26.8b      //(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
    371 
    372     add         x20,x8,#8
    373     csel        x8, x20, x8,gt
    374     umlal       v18.8h, v17.8b, v27.8b      //(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
    375     sub         x20,x4,#8
    376     csel        x4, x20, x4,gt
    377 
    378     st1         {v23.8b},[x0],x3            //(v)
    379     rshrn       v14.8b, v14.8h,#5           //(vi)shift_res = vrshrn_n_u16(add_res, 5)
    380 
    381     beq         epilogue
    382 
    383     ld1         {v5.8b},[x6]                //loads the row value
    384     smull       v2.8h, v5.8b, v0.8b         //pos = ((row + 1) * intra_pred_ang)
    385     and         v4.16b,  v2.16b ,  v6.16b   //dup_const_fract(fract = pos & (31))
    386     xtn         v4.8b,  v4.8h
    387     shrn        v3.8b, v2.8h,#5             //idx = pos >> 5
    388     shl         v3.8b, v3.8b,#1
    389     smov        x14, v3.2s[0]               //(i)extract idx to the r register
    390 //    lsl            x14,x14,#1
    391     sbfx        x9,x14,#0,#8
    392     add         x10,x8,x9                   //(i)*pu1_ref[ref_main_idx]
    393 
    394 kernel_8_rows:
    395     dup         v31.8b, v4.8b[0]
    396     subs        x4,x4,#8
    397     sbfx        x9,x14,#8,#8
    398 
    399     ld1         {v7.8b},[x10],x11           //(i)ref_main_idx
    400     sub         v24.8b,  v1.8b ,  v25.8b    //(viii)32-fract(dup_const_32_fract)
    401 
    402     add         x20,x6,#8                   //increment the row value
    403     csel        x6, x20, x6,le
    404     add         x12,x8,x9                   //(ii)*pu1_ref[ref_main_idx]
    405 
    406     ld1         {v19.8b},[x10]              //(i)ref_main_idx_1
    407     umull       v22.8h, v20.8b, v24.8b      //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
    408 
    409     ld1         {v5.8b},[x6]                //loads the row value
    410     umlal       v22.8h, v21.8b, v25.8b      //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
    411 
    412     dup         v29.8b, v4.8b[1]            //(ii)
    413     rshrn       v18.8b, v18.8h,#5           //(vii)shift_res = vrshrn_n_u16(add_res, 5)
    414 
    415     sbfx        x9,x14,#16,#8
    416 
    417     st1         {v14.8b},[x0],x3            //(vi)
    418     sub         v30.8b,  v1.8b ,  v31.8b    //(i)32-fract(dup_const_32_fract)
    419 
    420     add         x10,x8,x9                   //(iii)*pu1_ref[ref_main_idx]
    421 
    422     ld1         {v12.8b},[x12],x11          //(ii)ref_main_idx
    423     umull       v23.8h, v7.8b, v30.8b       //(i)vmull_u8(ref_main_idx, dup_const_32_fract)
    424 
    425     ld1         {v13.8b},[x12]              //(ii)ref_main_idx_1
    426     umlal       v23.8h, v19.8b, v31.8b      //(i)vmull_u8(ref_main_idx_1, dup_const_fract)
    427 
    428     sbfx        x9,x14,#24,#8
    429     csel        x4, x5, x4,le               //reload nt
    430 
    431     smov        x14, v3.2s[1]               //extract idx to the r register
    432     rshrn       v22.8b, v22.8h,#5           //(viii)shift_res = vrshrn_n_u16(add_res, 5)
    433 
    434     dup         v27.8b, v4.8b[2]            //(iii)
    435     sub         v28.8b,  v1.8b ,  v29.8b    //(ii)32-fract(dup_const_32_fract)
    436     add         x12,x8,x9                   //(iv)*pu1_ref[ref_main_idx]
    437 
    438     ld1         {v16.8b},[x10],x11          //(iii)ref_main_idx
    439     umull       v14.8h, v12.8b, v28.8b      //(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
    440 
    441     st1         {v18.8b},[x0],x3            //(vii)
    442     umlal       v14.8h, v13.8b, v29.8b      //(ii)vmull_u8(ref_main_idx_1, dup_const_fract)
    443 
    444     ld1         {v17.8b},[x10]              //(iii)ref_main_idx_1
    445     rshrn       v23.8b, v23.8h,#5           //(i)shift_res = vrshrn_n_u16(add_res, 5)
    446 
    447     dup         v25.8b, v4.8b[3]            //(iv)
    448     smull       v2.8h, v5.8b, v0.8b         //pos = ((row + 1) * intra_pred_ang)
    449 
    450     st1         {v22.8b},[x0]               //(viii)
    451     sub         v26.8b,  v1.8b ,  v27.8b    //(iii)32-fract(dup_const_32_fract)
    452 
    453     ld1         {v20.8b},[x12],x11          //(iv)ref_main_idx
    454     umull       v18.8h, v16.8b, v26.8b      //(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
    455 //    lsl            x14,x14,#1
    456 
    457     ld1         {v21.8b},[x12]              //(iv)ref_main_idx_1
    458     umlal       v18.8h, v17.8b, v27.8b      //(iii)vmull_u8(ref_main_idx_1, dup_const_fract)
    459 
    460     sbfx        x9,x14,#0,#8
    461     add         x0,x2,x3
    462 
    463     dup         v31.8b, v4.8b[4]            //(v)
    464     rshrn       v14.8b, v14.8h,#5           //(ii)shift_res = vrshrn_n_u16(add_res, 5)
    465 
    466     add         x10,x8,x9                   //(v)*pu1_ref[ref_main_idx]
    467     sbfx        x9,x14,#8,#8
    468 
    469     st1         {v23.8b},[x2],#8            //(i)
    470     sub         v24.8b,  v1.8b ,  v25.8b    //(iv)32-fract(dup_const_32_fract)
    471 
    472     dup         v29.8b, v4.8b[5]            //(vi)
    473     umull       v22.8h, v20.8b, v24.8b      //(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
    474 
    475     dup         v27.8b, v4.8b[6]            //(vii)
    476     umlal       v22.8h, v21.8b, v25.8b      //(iv)vmull_u8(ref_main_idx_1, dup_const_fract)
    477 
    478     add         x12,x8,x9                   //(vi)*pu1_ref[ref_main_idx]
    479     sbfx        x9,x14,#16,#8
    480 
    481     dup         v25.8b, v4.8b[7]            //(viii)
    482     rshrn       v18.8b, v18.8h,#5           //(iii)shift_res = vrshrn_n_u16(add_res, 5)
    483 
    484     ld1         {v7.8b},[x10],x11           //(v)ref_main_idx
    485     and         v4.16b,  v2.16b ,  v6.16b   //dup_const_fract(fract = pos & (31))
    486 
    487     ld1         {v19.8b},[x10]              //(v)ref_main_idx_1
    488     shrn        v3.8b, v2.8h,#5             //idx = pos >> 5
    489 
    490     st1         {v14.8b},[x0],x3            //(ii)
    491     rshrn       v22.8b, v22.8h,#5           //(iv)shift_res = vrshrn_n_u16(add_res, 5)
    492 
    493     add         x10,x8,x9                   //(vii)*pu1_ref[ref_main_idx]
    494     sbfx        x9,x14,#24,#8
    495 
    496     ld1         {v12.8b},[x12],x11          //(vi)ref_main_idx
    497     sub         v30.8b,  v1.8b ,  v31.8b    //(v)32-fract(dup_const_32_fract)
    498 
    499     shl         v3.8b, v3.8b,#1
    500 
    501     ld1         {v13.8b},[x12]              //(vi)ref_main_idx_1
    502     umull       v23.8h, v7.8b, v30.8b       //(v)vmull_u8(ref_main_idx, dup_const_32_fract)
    503 
    504     smov        x14, v3.2s[0]               //(i)extract idx to the r register
    505     umlal       v23.8h, v19.8b, v31.8b      //(v)vmull_u8(ref_main_idx_1, dup_const_fract)
    506 
    507     add         x12,x8,x9                   //(viii)*pu1_ref[ref_main_idx]
    508     csel        x8, x1, x8,le               //reload the source to pu1_src+2nt
    509 
    510     ld1         {v16.8b},[x10],x11          //(vii)ref_main_idx
    511     sub         v28.8b,  v1.8b ,  v29.8b    //(vi)32-fract(dup_const_32_fract)
    512 
    513     st1         {v18.8b},[x0],x3            //(iii)
    514     umull       v14.8h, v12.8b, v28.8b      //(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
    515 
    516     ld1         {v17.8b},[x10]              //(vii)ref_main_idx_1
    517     umlal       v14.8h, v13.8b, v29.8b      //(vi)vmull_u8(ref_main_idx_1, dup_const_fract)
    518 
    519     ld1         {v20.8b},[x12],x11          //(viii)ref_main_idx
    520     rshrn       v23.8b, v23.8h,#5           //(v)shift_res = vrshrn_n_u16(add_res, 5)
    521 
    522     ld1         {v21.8b},[x12]              //(viii)ref_main_idx_1
    523     sub         v26.8b,  v1.8b ,  v27.8b    //(vii)32-fract(dup_const_32_fract)
    524 
    525     add         x20,x8,#8                   //increment the source next set 8 columns in same row
    526     csel        x8, x20, x8,gt
    527     lsl         x20, x3,#3
    528     csel        x12,x20,x12,le
    529     sub         x20,x12,x5
    530     csel        x12, x20, x12,le
    531 
    532     st1         {v22.8b},[x0],x3            //(iv)
    533     umull       v18.8h, v16.8b, v26.8b      //(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
    534 
    535     st1         {v23.8b},[x0],x3            //(v)
    536     umlal       v18.8h, v17.8b, v27.8b      //(vii)vmull_u8(ref_main_idx_1, dup_const_fract)
    537 
    538     add         x20,x2,x12                  //increment the dst pointer to 8*dst_strd - nt
    539     csel        x2, x20, x2,le
    540     sbfx        x9,x14,#0,#8
    541 
    542     xtn         v4.8b,  v4.8h
    543     rshrn       v14.8b, v14.8h,#5           //(vi)shift_res = vrshrn_n_u16(add_res, 5)
    544 //    lsl            x14,x14,#1
    545 
    546     subs        x7,x7,#8
    547     add         x10,x8,x9                   //(i)*pu1_ref[ref_main_idx]
    548 
    549     bne         kernel_8_rows
    550 
    551 epilogue:
    552     st1         {v14.8b},[x0],x3            //(vi)
    553     rshrn       v18.8b, v18.8h,#5           //(vii)shift_res = vrshrn_n_u16(add_res, 5)
    554 
    555     sub         v24.8b,  v1.8b ,  v25.8b    //(viii)32-fract(dup_const_32_fract)
    556     umull       v22.8h, v20.8b, v24.8b      //(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
    557     umlal       v22.8h, v21.8b, v25.8b      //(viii)vmull_u8(ref_main_idx_1, dup_const_fract)
    558 
    559     st1         {v18.8b},[x0],x3            //(vii)
    560     rshrn       v22.8b, v22.8h,#5           //(viii)shift_res = vrshrn_n_u16(add_res, 5)
    561 
    562     st1         {v22.8b},[x0],x3            //(viii)
    563     b           end_loops
    564 
    565 core_loop_4:
    566 
    567 end_loops:
    568     add         sp, sp, #132
    569     // ldmfd sp!,{x4-x12,x15}               //reload the registers from sp
    570     ldp         x19, x20,[sp],#16
    571     ldp         d8,d14,[sp],#16             // Loading d14 using { ldr d14,[sp]; add sp,sp,#8 } is giving bus error.
    572                                             // d8 is used as dummy register and loaded along with d14 using ldp. d8 is not used in the function.
    573     ldp         d12,d13,[sp],#16
    574     ret
    575 
    576 
    577 
    578 
    579 
    580 
    581