Home | History | Annotate | Download | only in src_gcc
      1 /*
      2  * Copyright (C) 2007-2008 ARM Limited
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  *
     16  */
     17 /*
     18  *
     19  */
     20 
     21     .eabi_attribute 24, 1
     22     .eabi_attribute 25, 1
     23 
     24     .arm
     25     .fpu neon
     26     .text
     27 
     28     .global armVCM4P10_DeblockingChromabSLT4_unsafe
     29     .func   armVCM4P10_DeblockingChromabSLT4_unsafe
     30 armVCM4P10_DeblockingChromabSLT4_unsafe:
     31     VLD1.32  {d18[0]},[r5]!
     32     VSUBL.U8 q11,d5,d9
     33     VMOV     d28,d18
     34     VSUBL.U8 q10,d8,d4
     35     VSHR.S16 q11,q11,#2
     36     VZIP.8   d18,d28
     37     VBIF     d18,d14,d16
     38     VRHADD.S16 q10,q11,q10
     39     VADD.I8  d31,d18,d15
     40     VQMOVN.S16 d20,q10
     41     VLD1.8   {d0[]},[r2]
     42     VMIN.S8  d20,d20,d31
     43     VNEG.S8  d31,d31
     44     VLD1.8   {d2[]},[r3]
     45     VMAX.S8  d20,d20,d31
     46     VMOVL.U8 q14,d4
     47     VMOVL.U8 q12,d8
     48     VADDW.S8 q14,q14,d20
     49     VSUBW.S8 q12,q12,d20
     50     VQMOVUN.S16 d29,q14
     51     VQMOVUN.S16 d24,q12
     52     BX       lr
     53     .endfunc
     54 
     55     .global armVCM4P10_DeblockingChromabSGE4_unsafe
     56     .func   armVCM4P10_DeblockingChromabSGE4_unsafe
     57 armVCM4P10_DeblockingChromabSGE4_unsafe:
     58     VHADD.U8 d13,d4,d9
     59     VHADD.U8 d31,d8,d5
     60     VLD1.8   {d0[]},[r2]
     61     ADD      r5,r5,#4
     62     VLD1.8   {d2[]},[r3]
     63     VRHADD.U8 d13,d13,d5
     64     VRHADD.U8 d31,d31,d9
     65     BX       lr
     66     .endfunc
     67 
     68     .end
     69 
     70