/external/compiler-rt/lib/builtins/arm/ |
gtdf2vfp.S | 22 vmov d6, r0, r1 // load r0/r1 pair in double register 23 vmov d7, r2, r3 // load r2/r3 pair in double register
|
gtsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 23 vmov s15, r1 // move from GPR 1 to float register
|
ledf2vfp.S | 22 vmov d6, r0, r1 // load r0/r1 pair in double register 23 vmov d7, r2, r3 // load r2/r3 pair in double register
|
lesf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 23 vmov s15, r1 // move from GPR 1 to float register
|
ltdf2vfp.S | 22 vmov d6, r0, r1 // load r0/r1 pair in double register 23 vmov d7, r2, r3 // load r2/r3 pair in double register
|
ltsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 23 vmov s15, r1 // move from GPR 1 to float register
|
nedf2vfp.S | 22 vmov d6, r0, r1 // load r0/r1 pair in double register 23 vmov d7, r2, r3 // load r2/r3 pair in double register
|
nesf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 23 vmov s15, r1 // move from GPR 1 to float register
|
truncdfsf2vfp.S | 22 vmov d7, r0, r1 // load double from r0/r1 pair 24 vmov r0, s15 // return result in r0
|
unorddf2vfp.S | 22 vmov d6, r0, r1 // load r0/r1 pair in double register 23 vmov d7, r2, r3 // load r2/r3 pair in double register
|
unordsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 23 vmov s15, r1 // move from GPR 1 to float register
|
/external/llvm/test/CodeGen/ARM/ |
twoaddrinstr.ll | 10 ; CHECK-NEXT: vmov.f32 11 ; CHECK-NEXT: vmov.f32
|
vmov.ll | 5 ;CHECK: vmov.i8 d{{.*}}, #0x8 11 ;CHECK: vmov.i16 d{{.*}}, #0x10 17 ;CHECK: vmov.i16 d{{.*}}, #0x1000 35 ;CHECK: vmov.i32 d{{.*}}, #0x20 41 ;CHECK: vmov.i32 d{{.*}}, #0x2000 47 ;CHECK: vmov.i32 d{{.*}}, #0x200000 53 ;CHECK: vmov.i32 d{{.*}}, #0x20000000 59 ;CHECK: vmov.i32 d{{.*}}, #0x20ff 65 ;CHECK: vmov.i32 d{{.*}}, #0x20ffff 107 ;CHECK: vmov.i64 d{{.*}}, #0xff0000ff0000fff [all...] |
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
convolve_neon.s | 46 VMOV.S32 Q10, #0 63 VMOV.S32 r5, D20[0] 84 VMOV.S32 Q10, #0 99 VMOV.S32 r5, D20[0] 122 VMOV.S32 Q10, #0 138 VMOV.S32 r5, D20[0] 148 VMOV.S32 Q10, #0 163 VMOV.S32 r5, D20[0]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
row_neon.cc | 39 "vmov.u8 d3, d2 \n" \ 57 "vmov.u8 d2, #128 \n" 65 "vmov.u8 d3, d2 \n"/* split odd/even uv apart */\ 75 "vmov.u8 d3, d2 \n"/* split odd/even uv apart */\ 83 "vmov.u8 d3, d2 \n" \ 91 "vmov.u8 d0, d3 \n" \ 92 "vmov.u8 d3, d2 \n" \ 182 "vmov.u8 d23, #255 \n" 212 "vmov.u8 d23, #255 \n" 242 "vmov.u8 d23, #255 \n [all...] |
/external/libhevc/common/arm/ |
ihevc_intra_pred_luma_dc.s | 118 vmov d17, r11, r9 155 vmov d6, r4, r5 @store nt to accumulate 193 vmov d28, r14, r5 @src[2nt+1]+2+src[2nt-1] moved to d28 240 vmov.i64 d19, #0x00000000000000ff @ 251 vmov.i64 d20, #0x00000000000000ff @byte mask row 1 (prol) 257 vmov.i64 d21, #0x00000000000000ff @byte mask row 2 (prol) 266 vmov.i64 d20, #0x00000000000000ff @byte mask row 3 (prol) 274 vmov.i64 d21, #0x00000000000000ff @byte mask row 4 (prol) 282 vmov.i64 d20, #0x00000000000000ff @byte mask row 5 (prol) 290 vmov.i64 d21, #0x00000000000000ff @byte mask row 6 (prol [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 82 VMOV.I8 Q1,#2 @const_2 = vdupq_n_s8(2) 86 VMOV.I16 Q2,#0 @const_min_clip = vdupq_n_s16(0) 90 VMOV.I16 Q3,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 96 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 126 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 127 VMOV.8 D8[1],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 1) 132 VMOV.16 D8[0],r12 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 138 VMOV.8 D9[6],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 14) 139 VMOV.8 D9[7],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 153 VMOV.16 D15[3],r11 @vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15 [all...] |
ihevc_sao_edge_offset_class0.s | 78 VMOV.I8 Q1,#2 @const_2 = vdupq_n_s8(2) 82 VMOV.I16 Q2,#0 @const_min_clip = vdupq_n_s16(0) 86 VMOV.I16 Q3,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) 92 VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) 122 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 127 VMOV.8 D8[0],r12 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) 133 VMOV.8 D9[7],r12 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) 147 VMOV.8 D15[7],r11 @vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) 161 VMOV.8 D29[7],r11 @II Iteration vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) 173 VMOV.8 D14[0],r11 @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_t (…) [all...] |
/external/libpng/arm/ |
filter_neon.S | 67 vmov.i8 d3, #0 83 vmov.i8 d3, #0 122 vmov.i8 d3, #0 144 vmov.i8 d3, #0 194 vmov.i8 d3, #0 195 vmov.i8 d20, #0 206 vmov d20, d19 218 vmov.i8 d3, #0 219 vmov.i8 d4, #0 243 vmov d4, d1 [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
vpx_convolve8_avg_neon_asm.asm | 94 vmov d23, d21 161 vmov q8, q9 162 vmov d20, d23 163 vmov q11, q12 164 vmov q9, q13 274 vmov q8, q10 275 vmov d18, d22 276 vmov d19, d24 277 vmov q10, q13 278 vmov d22, d2 [all...] |
vpx_convolve8_neon_asm.asm | 94 vmov d23, d21 150 vmov q8, q9 151 vmov d20, d23 152 vmov q11, q12 153 vmov q9, q13 252 vmov q8, q10 253 vmov d18, d22 254 vmov d19, d24 255 vmov q10, q13 256 vmov d22, d2 [all...] |
/external/opencv3/3rdparty/libpng/arm/ |
filter_neon.S | 42 vmov.i8 d3, #0 58 vmov.i8 d3, #0 97 vmov.i8 d3, #0 119 vmov.i8 d3, #0 169 vmov.i8 d3, #0 170 vmov.i8 d20, #0 181 vmov d20, d19 193 vmov.i8 d3, #0 194 vmov.i8 d4, #0 218 vmov d4, d1 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
vp9_convolve8_avg_neon.asm | 100 vmov d23, d21 167 vmov q8, q9 168 vmov d20, d23 169 vmov q11, q12 170 vmov q9, q13 284 vmov q8, q10 285 vmov d18, d22 286 vmov d19, d24 287 vmov q10, q13 288 vmov d22, d2 [all...] |
vp9_convolve8_neon.asm | 100 vmov d23, d21 156 vmov q8, q9 157 vmov d20, d23 158 vmov q11, q12 159 vmov q9, q13 262 vmov q8, q10 263 vmov d18, d22 264 vmov d19, d24 265 vmov q10, q13 266 vmov d22, d2 [all...] |
/external/libyuv/files/source/ |
row_neon.cc | 39 "vmov.u8 d3, d2 \n" \ 57 "vmov.u8 d2, #128 \n" 65 "vmov.u8 d3, d2 \n"/* split odd/even uv apart */\ 75 "vmov.u8 d3, d2 \n"/* split odd/even uv apart */\ 83 "vmov.u8 d3, d2 \n" \ 91 "vmov.u8 d0, d3 \n" \ 92 "vmov.u8 d3, d2 \n" \ 145 "vmov.u8 d23, #255 \n" 175 "vmov.u8 d23, #255 \n" 238 "vmov.u8 d23, #255 \n [all...] |