Home | History | Annotate | Download | only in arm
      1 @/*****************************************************************************
      2 @*
      3 @* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore
      4 @*
      5 @* Licensed under the Apache License, Version 2.0 (the "License");
      6 @* you may not use this file except in compliance with the License.
      7 @* You may obtain a copy of the License at:
      8 @*
      9 @* http://www.apache.org/licenses/LICENSE-2.0
     10 @*
     11 @* Unless required by applicable law or agreed to in writing, software
     12 @* distributed under the License is distributed on an "AS IS" BASIS,
     13 @* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 @* See the License for the specific language governing permissions and
     15 @* limitations under the License.
     16 @*
     17 @*****************************************************************************/
     18 @/**
     19 @*******************************************************************************
     20 @* ,:file
     21 @*  ihevc_sao_edge_offset_class0_chroma.s
     22 @*
     23 @* ,:brief
     24 @*  Contains function definitions for inter prediction  interpolation.
     25 @* Functions are coded using NEON  intrinsics and can be compiled using@ ARM
     26 @* RVCT
     27 @*
     28 @* ,:author
     29 @*  Parthiban V
     30 @*
     31 @* ,:par List of Functions:
     32 @*
     33 @*
     34 @* ,:remarks
     35 @*  None
     36 @*
     37 @*******************************************************************************
     38 @*/
     39 @void ihevc_sao_edge_offset_class0_chroma(UWORD8 *pu1_src,
     40 @                              WORD32 src_strd,
     41 @                              UWORD8 *pu1_src_left,
     42 @                              UWORD8 *pu1_src_top,
     43 @                              UWORD8 *pu1_src_top_left,
     44 @                              UWORD8 *pu1_src_top_right,
     45 @                              UWORD8 *pu1_src_bot_left,
     46 @                              UWORD8 *pu1_avail,
     47 @                              WORD8 *pi1_sao_offset_u,
     48 @                              WORD8 *pi1_sao_offset_v,
     49 @                              WORD32 wd,
     50 @
     51 @**************Variables Vs Registers*****************************************
     52 @r0 =>  *pu1_src
     53 @r1 =>  src_strd
     54 @r2 =>  *pu1_src_left
     55 @r3 =>  *pu1_src_top
     56 @r4 =>  *pu1_src_top_left
     57 @r7 =>  *pu1_avail
     58 @r8 =>  *pi1_sao_offset_u
     59 @r5 =>  *pi1_sao_offset_v
     60 @r9 =>  wd
     61 @r10=>  ht
     62 
     63 .text
     64 .p2align 2
     65 
     66 .extern gi1_table_edge_idx
     67 .globl ihevc_sao_edge_offset_class0_chroma_a9q
     68 
     69 gi1_table_edge_idx_addr:
     70 .long gi1_table_edge_idx - ulbl1 - 8
     71 
     72 ihevc_sao_edge_offset_class0_chroma_a9q:
     73 
     74 
     75     STMFD       sp!, {r4-r12, r14}          @stack stores the values of the arguments
     76     LDR         r9,[sp,#64]                 @Loads wd
     77 
     78     LDR         r4,[sp,#40]                 @Loads pu1_src_top_left
     79     ADD         r11,r3,r9                   @pu1_src_top[wd]
     80 
     81     LDR         r10,[sp,#68]                @Loads ht
     82     VMOV.I8     Q1,#2                       @const_2 = vdupq_n_s8(2)
     83     LDRH        r12,[r11,#-2]               @pu1_src_top[wd - 1]
     84 
     85     LDR         r7,[sp,#52]                 @Loads pu1_avail
     86     VMOV.I16    Q2,#0                       @const_min_clip = vdupq_n_s16(0)
     87     STRH        r12,[r4]                    @*pu1_src_top_left = pu1_src_top[wd - 1]
     88 
     89     LDR         r8,[sp,#56]                 @Loads pi1_sao_offset_u
     90     VMOV.I16    Q3,#255                     @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1)
     91     SUB         r4,r10,#1                   @(ht - 1)
     92 
     93     LDR         r14, gi1_table_edge_idx_addr @table pointer
     94 ulbl1:
     95     add         r14,r14,pc
     96     VMOV.S8     Q4,#0xFF                    @au1_mask = vdupq_n_s8(-1)
     97     MUL         r4,r4,r1                    @(ht - 1) * src_strd
     98 
     99     LDR         r5,[sp,#60]                 @Loads pi1_sao_offset_v
    100     VLD1.8      D11,[r8]                    @offset_tbl = vld1_s8(pi1_sao_offset_u)
    101     ADD         r4,r4,r0                    @pu1_src[(ht - 1) * src_strd]
    102 
    103     MOV         r6,r0                       @pu1_src_org
    104     VLD1.8      D10,[r14]                   @edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
    105     MOV         r12,r9                      @Move wd to r12 for loop count
    106 
    107 SRC_TOP_LOOP:                               @wd is always multiple of 8
    108     VLD1.8      D0,[r4]!                    @Load pu1_src[(ht - 1) * src_strd + col]
    109     SUBS        r12,r12,#8                  @Decrement the loop counter by 8
    110     VST1.8      D0,[r3]!                    @Store to pu1_src_top[col]
    111     BNE         SRC_TOP_LOOP
    112     ADD         r6,r6,#14                   @pu1_src_org[14]
    113 
    114     MOV         r3,r2                       @pu1_src_left backup to reload later
    115     VLD1.8      D0,[r5]                     @offset_tbl = vld1_s8(pi1_sao_offset_v)
    116     CMP         r9,#16                      @Compare wd with 16
    117 
    118     BLT         WIDTH_RESIDUE               @If not jump to WIDTH_RESIDUE where loop is unrolled for 8 case
    119 
    120     MOV         r8,r9                       @move wd to r8 for loop count
    121 
    122 WIDTH_LOOP_16:
    123     CMP         r8,r9                       @if(col == wd)
    124     BNE         AU1_MASK_FF                 @jump to else part
    125     LDRB        r12,[r7]                    @pu1_avail[0]
    126     VMOV.8      D8[0],r12                   @vsetq_lane_s8(pu1_avail[0], au1_mask, 0)
    127     VMOV.8      D8[1],r12                   @vsetq_lane_s8(pu1_avail[0], au1_mask, 1)
    128     B           SKIP_AU1_MASK_FF            @Skip the else part
    129 
    130 AU1_MASK_FF:
    131     MOV         r12,#-1                     @move -1 to r12
    132     VMOV.16     D8[0],r12                   @au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
    133 
    134 SKIP_AU1_MASK_FF:
    135     CMP         r8,#16                      @If col == 16
    136     BNE         SKIP_MASKING_IF_NOT16       @If not skip masking
    137     LDRB        r12,[r7,#1]                 @pu1_avail[1]
    138     VMOV.8      D9[6],r12                   @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 14)
    139     VMOV.8      D9[7],r12                   @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
    140 
    141 SKIP_MASKING_IF_NOT16:
    142     MOV         r12,r0                      @pu1_src_cpy = pu1_src
    143     MOV         r4,r10                      @move ht to r4 for loop count
    144 
    145 PU1_SRC_LOOP:
    146     LDRH        r11,[r2]                    @load pu1_src_left since ht - row =0 when it comes first pu1_src_left is incremented later
    147     VLD1.8      D12,[r12]!                  @pu1_cur_row = vld1q_u8(pu1_src_cpy)
    148     VLD1.8      D13,[r12],r1                @pu1_cur_row = vld1q_u8(pu1_src_cpy)
    149     SUB         r12,#8
    150     SUB         r5,r9,r8                    @wd - col
    151 
    152     SUB         r14,r10,r4                  @ht - row
    153     VMOV.16     D15[3],r11                  @vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15)
    154     MUL         r14,r14,r1                  @(ht - row) * src_strd
    155 
    156     VLD1.8      D30,[r12]!                  @II Iteration pu1_cur_row = vld1q_u8(pu1_src_cpy)
    157     VLD1.8      D31,[r12]                   @II Iteration pu1_cur_row = vld1q_u8(pu1_src_cpy)
    158     SUB         r12,#8
    159     VEXT.8      Q7,Q7,Q6,#14                @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14)
    160     SUB         r12,r12,r1
    161 
    162     LDRH        r11,[r2,#2]                 @II load pu1_src_left since ht - row =0
    163     VCGT.U8     Q8,Q6,Q7                    @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    164     ADD         r5,r14,r5                   @(ht - row) * src_strd + (wd - col)
    165 
    166     VMOV.16     D29[3],r11                  @II vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15)
    167     VCLT.U8     Q9,Q6,Q7                    @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    168 
    169     LDRH        r14,[r6,r5]                 @pu1_src_org[(ht - row) * src_strd + 14 + (wd - col)]
    170     VSUB.U8     Q10,Q9,Q8                   @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    171     SUB         r4,r4,#1
    172 
    173     LDRB        r11,[r12,#16]               @pu1_src_cpy[16]
    174     VEXT.8      Q14,Q14,Q15,#14             @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14)
    175 
    176     VMOV.8      D14[0],r11                  @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
    177     VCGT.U8     Q13,Q15,Q14                 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    178 
    179     LDRB        r11,[r12,#17]               @pu1_src_cpy[17]
    180     VCLT.U8     Q12,Q15,Q14                 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    181     STRH        r14,[r2],#2                 @pu1_src_left[(ht - row)] = au1_src_left_tmp[(ht - row)]
    182 
    183     ADD         r12,r12,r1
    184     VMOV.8      D14[1],r11                  @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1)
    185     LDRB        r11,[r12,#16]               @II pu1_src_cpy[16]
    186 
    187     VEXT.8      Q7,Q6,Q7,#2                 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2)
    188     VMOV.8      D28[0],r11                  @II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
    189 
    190     LDRB        r11,[r12,#17]               @II pu1_src_cpy[17]
    191     VCGT.U8     Q8,Q6,Q7                    @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    192     SUB         r12,r12,r1
    193 
    194     VCLT.U8     Q9,Q6,Q7                    @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    195     VMOV.8      D28[1],r11                  @II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1)
    196 
    197     VSUB.U8     Q11,Q9,Q8                   @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    198     VEXT.8      Q14,Q15,Q14,#2              @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2)
    199 
    200     VADD.U8     Q7,Q1,Q10                   @edge_idx = vaddq_s8(const_2, sign_left)
    201 
    202     VADD.U8     Q7,Q7,Q11                   @edge_idx = vaddq_s8(edge_idx, sign_right)
    203     VTBL.8      D14,{D10},D14               @vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
    204     VSUB.U8     Q10,Q12,Q13                 @II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    205 
    206     VCGT.U8     Q13,Q15,Q14                 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    207     VTBL.8      D15,{D10},D15               @vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
    208     VCLT.U8     Q12,Q15,Q14                 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    209 
    210     VAND        Q7,Q7,Q4                    @edge_idx = vandq_s8(edge_idx, au1_mask)
    211     VUZP.8      D14,D15
    212 
    213     VSUB.U8     Q11,Q12,Q13                 @II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    214     VTBL.8      D16,{D11},D14               @offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx))
    215     VADD.U8     Q12,Q1,Q10                  @II edge_idx = vaddq_s8(const_2, sign_left)
    216 
    217     VMOVL.U8    Q9,D12                      @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
    218     VTBL.8      D17,{D0},D15
    219     VADD.U8     Q12,Q12,Q11                 @II edge_idx = vaddq_s8(edge_idx, sign_right)
    220 
    221     VZIP.S8     D16,D17
    222     VTBL.8      D24,{D10},D24               @II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
    223     VMOVL.U8    Q6,D13                      @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
    224 
    225     VADDW.S8    Q9,Q9,D16                   @pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
    226     VTBL.8      D25,{D10},D25               @II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
    227     VMAX.S16    Q9,Q9,Q2                    @pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
    228 
    229     VAND        Q12,Q12,Q4                  @II edge_idx = vandq_s8(edge_idx, au1_mask)
    230     VMIN.U16    Q9,Q9,Q3                    @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
    231     VUZP.8      D24,D25                     @II
    232 
    233     VADDW.S8    Q6,Q6,D17                   @pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
    234     VTBL.8      D26,{D11},D24               @II offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx))
    235     VMAX.S16    Q6,Q6,Q2                    @pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
    236 
    237     VMIN.U16    Q6,Q6,Q3                    @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
    238     VTBL.8      D27,{D0},D25                @II
    239     VMOVN.I16   D14,Q9                      @vmovn_s16(pi2_tmp_cur_row.val[0])
    240 
    241     VMOVN.I16   D15,Q6                      @vmovn_s16(pi2_tmp_cur_row.val[1])
    242     VZIP.S8     D26,D27                     @II
    243 
    244     SUB         r5,r9,r8                    @II wd - col
    245     VMOVL.U8    Q14,D30                     @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
    246     SUB         r14,r10,r4                  @II ht - row
    247 
    248     MUL         r14,r14,r1                  @II (ht - row) * src_strd
    249     VADDW.S8    Q14,Q14,D26                 @II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
    250     ADD         r5,r14,r5                   @II (ht - row) * src_strd + (wd - col)
    251 
    252     LDRH        r14,[r6,r5]                 @II pu1_src_org[(ht - row) * src_strd + 14 + (wd - col)]
    253     VMAX.S16    Q14,Q14,Q2                  @II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
    254 
    255     STRH        r14,[r2],#2                 @II pu1_src_left[(ht - row)] = au1_src_left_tmp[(ht - row)]
    256     VMIN.U16    Q14,Q14,Q3                  @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
    257 
    258     VMOVL.U8    Q15,D31                     @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
    259 
    260     VADDW.S8    Q15,Q15,D27                 @II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
    261     VST1.8      {D14,D15},[r12],r1          @vst1q_u8(pu1_src_cpy, pu1_cur_row)
    262 
    263     VMAX.S16    Q15,Q15,Q2                  @II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
    264     SUBS        r4,r4,#1                    @Decrement row by 1
    265     VMIN.U16    Q15,Q15,Q3                  @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
    266 
    267     VMOVN.I16   D28,Q14                     @II vmovn_s16(pi2_tmp_cur_row.val[0])
    268     VMOVN.I16   D29,Q15                     @II vmovn_s16(pi2_tmp_cur_row.val[1])
    269 
    270     VST1.8      {D28,D29},[r12],r1          @II vst1q_u8(pu1_src_cpy, pu1_cur_row)
    271 
    272     BNE         PU1_SRC_LOOP                @If not equal jump to the inner loop
    273 
    274     ADD         r0,r0,#16                   @pu1_src += 16
    275 
    276     SUBS        r8,r8,#16                   @Decrement column by 16
    277     CMP         r8,#8                       @Check whether residue remains
    278     MOV         r2,r3                       @Reload pu1_src_left
    279     BEQ         WIDTH_RESIDUE               @If residue remains jump to residue loop
    280     BGT         WIDTH_LOOP_16               @If not equal jump to width_loop
    281     BLT         END_LOOPS                   @Jump to end function
    282 
    283 WIDTH_RESIDUE:
    284     SUB         r6,r6,#14
    285     AND         r8,r9,#0xF                  @wd_rem = wd & 0xF
    286     CMP         r8,#0                       @Residue check
    287     BEQ         END_LOOPS                   @No Residue jump to end function
    288 
    289     CMP         r8,r9                       @if(wd_rem == wd)
    290     BNE         AU1_MASK_FF_RESIDUE         @jump to else part
    291     LDRB        r12,[r7]                    @pu1_avail[0]
    292     VMOV.8      D8[0],r12                   @vsetq_lane_s8(pu1_avail[0], au1_mask, 0)
    293     VMOV.8      D8[1],r12                   @vsetq_lane_s8(pu1_avail[0], au1_mask, 0)
    294     B           SKIP_AU1_MASK_FF_RESIDUE    @Skip the else part
    295 
    296 AU1_MASK_FF_RESIDUE:
    297     MOV         r12,#-1                     @move -1 to r12
    298     VMOV.16     D8[0],r12                   @au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
    299 
    300 SKIP_AU1_MASK_FF_RESIDUE:
    301     LDRB        r12,[r7,#1]                 @pu1_avail[1]
    302     VMOV.8      D8[6],r12                   @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
    303     VMOV.8      D8[7],r12                   @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
    304 
    305     MOV         r12,r0                      @pu1_src_cpy = pu1_src
    306     MOV         r4,r10                      @move ht to r4 for loop count
    307 
    308 PU1_SRC_LOOP_RESIDUE:
    309     LDRH        r11,[r2]                    @load pu1_src_left
    310     VLD1.8      D12,[r12]!                  @pu1_cur_row = vld1q_u8(pu1_src_cpy)
    311     VLD1.8      D13,[r12],r1                @pu1_cur_row = vld1q_u8(pu1_src_cpy)
    312     SUB         r12,#8
    313     SUB         r5,r9,#2                    @wd - 2
    314 
    315     SUB         r14,r10,r4                  @(ht - row)
    316     VMOV.16     D15[3],r11                  @vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15)
    317     LSL         r14,r14,#1                  @(ht - row) * 2
    318 
    319     VLD1.8      D30,[r12]!                  @II pu1_cur_row = vld1q_u8(pu1_src_cpy)
    320     VLD1.8      D31,[r12]                   @II pu1_cur_row = vld1q_u8(pu1_src_cpy)
    321     SUB         r12,#8
    322     VEXT.8      Q7,Q7,Q6,#14                @pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15)
    323     SUB         r12,r12,r1
    324 
    325     LDRH        r11,[r2,#2]                 @II load pu1_src_left
    326     VCGT.U8     Q8,Q6,Q7                    @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    327     MUL         r14,r14,r1                  @(ht - row) * 2 * src_strd
    328 
    329     VCLT.U8     Q9,Q6,Q7                    @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    330     VMOV.16     D29[3],r11                  @II vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15)
    331 
    332     LDRB        r11,[r12,#16]               @pu1_src_cpy[16]
    333     VSUB.U8     Q10,Q9,Q8                   @sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    334     ADD         r5,r14,r5                   @(ht - row) * 2 * src_strd + (wd - 2)
    335 
    336     VMOV.8      D14[0],r11                  @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
    337     VEXT.8      Q14,Q14,Q15,#14             @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15)
    338 
    339     LDRB        r11,[r12,#17]               @pu1_src_cpy[17]
    340     VCGT.U8     Q13,Q15,Q14                 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    341     LDRH        r14,[r6, r5]                @pu1_src_org[(ht - row)  * 2* src_strd + (wd - 2)]
    342 
    343     VMOV.8      D14[1],r11                  @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1)
    344     VCLT.U8     Q12,Q15,Q14                 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    345     ADD         r12,r12,r1
    346 
    347     STRH        r14,[r2],#2                 @pu1_src_left[(ht - row) * 2] = au1_src_left_tmp[(ht - row) * 2]
    348     VEXT.8      Q7,Q6,Q7,#2                 @pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1)
    349     LDRB        r11,[r12,#16]               @II pu1_src_cpy[16]
    350 
    351     VCGT.U8     Q8,Q6,Q7                    @vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    352     VMOV.8      D28[0],r11                  @II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
    353 
    354     LDRB        r11,[r12,#17]               @II pu1_src_cpy[17]
    355     VCLT.U8     Q9,Q6,Q7                    @vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    356     SUB         r4,r4,#1                    @II Decrement row by 1
    357 
    358     VSUB.U8     Q11,Q9,Q8                   @sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    359     VMOV.8      D28[1],r11                  @II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1)
    360     SUB         r12,r12,r1
    361 
    362     VADD.U8     Q7,Q1,Q10                   @edge_idx = vaddq_s8(const_2, sign_left)
    363     VEXT.8      Q14,Q15,Q14,#2              @II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1)
    364 
    365     VADD.U8     Q7,Q7,Q11                   @edge_idx = vaddq_s8(edge_idx, sign_right)
    366 
    367     VSUB.U8     Q10,Q12,Q13                 @II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    368     VTBL.8      D14,{D10},D14               @vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
    369     VCGT.U8     Q13,Q15,Q14                 @II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp)
    370 
    371     VCLT.U8     Q12,Q15,Q14                 @II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
    372     VTBL.8      D15,{D10},D15               @vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
    373     VSUB.U8     Q11,Q12,Q13                 @II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
    374 
    375     VAND        Q7,Q7,Q4                    @edge_idx = vandq_s8(edge_idx, au1_mask)
    376     VUZP.8      D14,D15
    377 
    378     VADD.U8     Q14,Q1,Q10                  @II edge_idx = vaddq_s8(const_2, sign_left)
    379     VTBL.8      D16,{D11},D14               @offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx))
    380     VADD.U8     Q14,Q14,Q11                 @II edge_idx = vaddq_s8(edge_idx, sign_right)
    381 
    382     VMOVL.U8    Q9,D12                      @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
    383     VTBL.8      D17,{D0},D15
    384     VMOVL.U8    Q12,D30                     @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
    385 
    386     VZIP.S8     D16,D17
    387     VTBL.8      D28,{D10},D28               @II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
    388     VADDW.S8    Q9,Q9,D16                   @pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
    389 
    390     VMAX.S16    Q9,Q9,Q2                    @pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
    391     VTBL.8      D29,{D10},D29               @II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
    392     VMIN.U16    Q9,Q9,Q3                    @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
    393 
    394     VMOVN.I16   D18,Q9                      @vmovn_s16(pi2_tmp_cur_row.val[0])
    395     VAND        Q14,Q14,Q4                  @II edge_idx = vandq_s8(edge_idx, au1_mask)
    396 
    397     SUB         r5,r9,#2                    @II wd - 2
    398     VUZP.8      D28,D29                     @II
    399     SUB         r14,r10,r4                  @II (ht - row)
    400 
    401     LSL         r14,r14,#1                  @II (ht - row) * 2
    402     VTBL.8      D26,{D11},D28               @II offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx))
    403     MUL         r14,r14,r1                  @II (ht - row) * 2 * src_strd
    404 
    405     ADD         r5,r14,r5                   @II (ht - row) * 2 * src_strd + (wd - 2)
    406     VTBL.8      D27,{D0},D29                @II
    407     LDRH        r14,[r6, r5]                @II pu1_src_org[(ht - row)  * 2* src_strd + (wd - 2)]
    408 
    409     VZIP.S8     D26,D27                     @II
    410     VST1.8      {D18},[r12],r1              @vst1q_u8(pu1_src_cpy, pu1_cur_row)
    411 
    412     STRH        r14,[r2],#2                 @II pu1_src_left[(ht - row) * 2] = au1_src_left_tmp[(ht - row) * 2]
    413     VADDW.S8    Q12,Q12,D26                 @II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
    414     SUBS        r4,r4,#1                    @Decrement row by 1
    415 
    416     VMAX.S16    Q12,Q12,Q2                  @II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
    417     VMIN.U16    Q12,Q12,Q3                  @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
    418 
    419     VMOVN.I16   D28,Q12                     @II vmovn_s16(pi2_tmp_cur_row.val[0])
    420 
    421     VST1.8      {D28},[r12],r1              @II vst1q_u8(pu1_src_cpy, pu1_cur_row)
    422 
    423     BNE         PU1_SRC_LOOP_RESIDUE        @If not equal jump to the pu1_src loop
    424 
    425 END_LOOPS:
    426     LDMFD       sp!,{r4-r12,r15}            @Reload the registers from SP
    427 
    428 
    429 
    430 
    431 
    432