HomeSort by relevance Sort by last modified time
    Searched refs:v18 (Results 1 - 25 of 166) sorted by null

1 2 3 4 5 6 7

  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/mips/
sb1-ext-mdmx.s 23 add.ob $v1, $v12, $v18
24 add.ob $v1, $v12, $v18[6]
27 adda.ob $v12, $v18
28 adda.ob $v12, $v18[6]
31 addl.ob $v12, $v18
32 addl.ob $v12, $v18[6]
34 alni.ob $v1, $v12, $v18, 6
36 alnv.ob $v1, $v12, $v18, $21
39 and.ob $v1, $v12, $v18
40 and.ob $v1, $v12, $v18[6
    [all...]
mips64-mdmx.s 18 add.ob $v1, $v12, $v18
19 add.ob $v1, $v12, $v18[6]
22 add.qh $v1, $v12, $v18
23 add.qh $v1, $v12, $v18[2]
26 adda.ob $v12, $v18
27 adda.ob $v12, $v18[6]
30 adda.qh $v12, $v18
31 adda.qh $v12, $v18[2]
34 addl.ob $v12, $v18
35 addl.ob $v12, $v18[6
    [all...]
sb1-ext-mdmx.d 15 0+0014 <[^>]*> 7ad2604b add\.ob \$v1,\$v12,\$v18
16 0+0018 <[^>]*> 7992604b add\.ob \$v1,\$v12,\$v18\[6\]
18 0+0020 <[^>]*> 7ad26037 adda\.ob \$v12,\$v18
19 0+0024 <[^>]*> 79926037 adda\.ob \$v12,\$v18\[6\]
21 0+002c <[^>]*> 7ad26437 addl\.ob \$v12,\$v18
22 0+0030 <[^>]*> 79926437 addl\.ob \$v12,\$v18\[6\]
23 0+0034 <[^>]*> 78d26058 alni\.ob \$v1,\$v12,\$v18,6
24 0+0038 <[^>]*> 7ab26059 alnv\.ob \$v1,\$v12,\$v18,s5
26 0+0040 <[^>]*> 7ad2604c and\.ob \$v1,\$v12,\$v18
27 0+0044 <[^>]*> 7992604c and\.ob \$v1,\$v12,\$v18\[6\
    [all...]
mips64-mdmx.d 15 0+0014 <[^>]*> 7ad2604b add\.ob \$v1,\$v12,\$v18
16 0+0018 <[^>]*> 7992604b add\.ob \$v1,\$v12,\$v18\[6\]
18 0+0020 <[^>]*> 7ab2604b add\.qh \$v1,\$v12,\$v18
19 0+0024 <[^>]*> 7932604b add\.qh \$v1,\$v12,\$v18\[2\]
21 0+002c <[^>]*> 7ad26037 adda\.ob \$v12,\$v18
22 0+0030 <[^>]*> 79926037 adda\.ob \$v12,\$v18\[6\]
24 0+0038 <[^>]*> 7ab26037 adda\.qh \$v12,\$v18
25 0+003c <[^>]*> 79326037 adda\.qh \$v12,\$v18\[2\]
27 0+0044 <[^>]*> 7ad26437 addl\.ob \$v12,\$v18
28 0+0048 <[^>]*> 79926437 addl\.ob \$v12,\$v18\[6\
    [all...]
  /external/boringssl/linux-aarch64/crypto/modes/
ghashv8-armx64.S 16 ushr v18.2d,v19.2d,#63
18 ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
19 ushr v18.2d,v3.2d,#63
21 and v18.16b,v18.16b,v16.16b
23 ext v18.16b,v18.16b,v18.16b,#8
25 orr v3.16b,v3.16b,v18.16b //H<<<=1
37 eor v18.16b,v0.16b,v2.16
    [all...]
  /external/libavc/common/armv8/
ih264_inter_pred_luma_horz_qpel_vert_qpel_av8.s 152 ld1 {v18.2s, v19.2s}, [x6], x2 // horz row0, col 0
158 ext v23.8b, v18.8b , v19.8b , #5
159 ext v20.8b, v18.8b , v19.8b , #2
160 ext v21.8b, v18.8b , v19.8b , #3
161 ext v22.8b, v18.8b , v19.8b , #4
162 ext v19.8b, v18.8b , v19.8b , #1
164 uaddl v28.8h, v18.8b, v23.8b
169 ld1 {v18.2s, v19.2s}, [x11], x2 // horz row 0, col 1
176 ext v23.8b, v18.8b , v19.8b , #5
177 ext v20.8b, v18.8b , v19.8b , #
    [all...]
ih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s 111 uaddl v18.8h, v0.8b, v10.8b
113 mla v18.8h, v20.8h , v28.8h
119 mls v18.8h, v22.8h , v30.8h
126 ext v24.16b, v18.16b , v20.16b , #4
127 ext v26.16b, v18.16b , v20.16b , #6
129 ext v23.16b, v18.16b , v20.16b , #10
131 ext v24.16b, v18.16b , v20.16b , #2
132 ext v26.16b, v18.16b , v20.16b , #8
135 saddl v26.4s, v18.4h, v23.4h
139 saddl2 v23.4s, v18.8h, v23.8
    [all...]
ih264_inter_pred_luma_horz_qpel_vert_hpel_av8.s 173 uaddl v18.8h, v0.8b, v10.8b
175 mla v18.8h, v20.8h , v28.8h
181 mls v18.8h, v22.8h , v30.8h
187 st1 {v18.4s }, [x9], #16
189 ext v24.16b, v18.16b , v20.16b , #4
190 ext v26.16b, v18.16b , v20.16b , #6
192 ext v22.16b, v18.16b , v20.16b , #10
194 ext v24.16b, v18.16b , v20.16b , #2
195 ext v26.16b, v18.16b , v20.16b , #8
198 saddl v26.4s, v18.4h, v22.4
    [all...]
ih264_inter_pred_luma_horz_hpel_vert_qpel_av8.s 237 uaddl v18.8h, v2.8b, v3.8b
239 mla v16.8h, v18.8h , v22.8h
242 uaddl v18.8h, v1.8b, v4.8b
244 mls v16.8h, v18.8h , v24.8h
253 saddl v18.4s, v6.4h, v16.4h
261 smlal v18.4s, v30.4h, v22.4h
262 smlsl v18.4s, v28.4h, v24.4h
268 sqrshrun v18.4h, v18.4s, #10
276 uqxtn v18.8b, v18.8
    [all...]
ih264_inter_pred_filters_luma_vert_av8.s 140 uaddl v18.8h, v5.8b, v7.8b // temp3 = src[2_8] + src[3_8]
141 mla v20.8h, v18.8h , v22.8h // temp4 += temp3 * 20
147 uaddl v18.8h, v4.8b, v10.8b
156 mls v16.8h, v18.8h , v24.8h
158 uaddl v18.8h, v4.8b, v2.8b
162 mla v18.8h, v12.8h , v22.8h
170 mls v18.8h, v20.8h , v24.8h
180 sqrshrun v30.8b, v18.8h, #5
181 uaddl v18.8h, v7.8b, v5.8b
183 mla v18.8h, v12.8h , v22.8
    [all...]
ih264_inter_pred_luma_vert_qpel_av8.s 147 uaddl v18.8h, v5.8b, v7.8b // temp3 = src[2_8] + src[3_8]
148 mla v20.8h, v18.8h , v22.8h // temp4 += temp3 * 20
154 uaddl v18.8h, v4.8b, v10.8b
163 mls v16.8h, v18.8h , v24.8h
168 uaddl v18.8h, v4.8b, v2.8b
171 mla v18.8h, v12.8h , v22.8h
179 mls v18.8h, v20.8h , v24.8h
191 sqrshrun v30.8b, v18.8h, #5
192 uaddl v18.8h, v7.8b, v5.8b
194 mla v18.8h, v12.8h , v22.8
    [all...]
  /external/libmpeg2/common/armv8/
ideint_spatial_filter_av8.s 69 movi v18.8h, #0
112 uabal v18.8h, v1.8b, v5.8b
126 addp v18.8h, v18.8h, v18.8h
130 uaddlp v18.2s, v18.4h
137 mul v18.2s, v18.2s, v31.2s
148 smov x6, v18.2s[0
    [all...]
  /external/libhevc/common/arm64/
ihevc_sao_edge_offset_class3_chroma.s 351 movi v18.16b, #0
382 movi v18.16b, #0 //I
392 mov v18.h[7], w5 //I vsetq_lane_u8
396 EXT v18.16b, v18.16b , v16.16b,#14 //I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14)
429 cmhi v20.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
431 cmhi v22.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
434 ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
435 ADD v18.16b, v18.16b , v22.16b //I edge_idx = vaddq_s8(edge_idx, sign_down
    [all...]
ihevc_weighted_pred_bi.s 251 smull v18.4s, v2.4h, v7.h[0] //vmull_n_s16(pi2_src1_val2, (int16_t) wgt0) iv iteration
263 add v18.4s, v18.4s , v20.4s //vaddq_s32(i4_tmp2_t1, i4_tmp2_t2) iv iteration
266 add v18.4s, v18.4s , v30.4s //vaddq_s32(i4_tmp2_t1, tmp_lvl_shift_t) iv iteration
271 sshl v18.4s,v18.4s,v28.4s
279 sqxtun v18.4h, v18.4s //vqmovun_s32(sto_res_tmp1) iv iteration
280 //mov v19, v18 //vcombine_u16(sto_res_tmp2, sto_res_tmp2
    [all...]
ihevc_sao_edge_offset_class2.s 284 movi v18.16b, #0
292 mov v18.b[0], w5 //I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
294 EXT v18.16b, v16.16b , v18.16b,#1 //I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 1)
313 cmhi v3.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
316 cmhi v18.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
317 SUB v3.16b, v18.16b , v3.16b //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
320 TBL v18.16b, {v6.16b},v24.16b //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
323 AND v18.16b, v18.16b , v1.16b //I edge_idx = vandq_s8(edge_idx, au1_mask
    [all...]
ihevc_sao_edge_offset_class3.s 290 movi v18.16b, #0
305 mov v18.b[15], w8 //I vsetq_lane_u8
308 EXT v18.16b, v18.16b , v16.16b,#15 //I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 15)
326 cmhi v3.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
327 cmhi v18.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
328 SUB v3.16b, v18.16b , v3.16b //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
330 ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
331 ADD v18.16b, v18.16b , v3.16b //I edge_idx = vaddq_s8(edge_idx, sign_down
    [all...]
ihevc_sao_edge_offset_class0.s 162 cmhi v18.16b, v21.16b , v17.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
165 SUB v20.16b, v18.16b , v16.16b //sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
188 cmhi v18.16b, v21.16b , v17.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp)
192 SUB v22.16b, v18.16b , v16.16b //sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
199 Uxtl v18.8h, v17.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
216 SADDW v18.8h, v18.8h , v16.8b
218 SMAX v18.8h, v18.8h , v4.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
221 UMIN v18.8h, v18.8h , v6.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u (…)
    [all...]
ihevc_sao_edge_offset_class2_chroma.s 399 movi v18.16b, #0
403 mov v18.h[0], w5 //I pu1_next_row_tmp = vsetq_lane_u8(pu1_src_cpy[src_strd + 16], pu1_next_row_tmp, 0)
407 EXT v18.16b, v16.16b , v18.16b,#2 //I pu1_next_row_tmp = vextq_u8(pu1_next_row, pu1_next_row_tmp, 2)
443 cmhi v20.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
445 cmhi v22.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
448 ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
449 ADD v18.16b, v18.16b , v22.16b //I edge_idx = vaddq_s8(edge_idx, sign_down)
451 TBL v18.16b, {v30.16b},v18.16b //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)
    [all...]
ihevc_intra_pred_chroma_horz.s 120 ld1 { v18.8h},[x12] //load 16 values. d1[7] will have the 1st value.
149 dup v2.8h, v18.h[7]
153 dup v4.8h, v18.h[6]
157 dup v6.8h, v18.h[5]
161 dup v1.8h, v18.h[4]
165 dup v2.8h, v18.h[3]
169 dup v4.8h, v18.h[2]
173 dup v6.8h, v18.h[1]
178 dup v1.8h, v18.h[0]
206 dup v18.8h, v0.h[7
    [all...]
ihevc_sao_edge_offset_class1.s 145 LD1 {v18.16b},[x10] //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
148 cmhi v5.16b, v3.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
151 cmhi v17.16b, v18.16b , v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
155 Uxtl v26.8h, v18.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
158 Uxtl2 v28.8h, v18.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
161 cmhi v22.16b, v18.16b , v30.16b //II vcgtq_u8(pu1_cur_row, pu1_top_row)
165 cmhi v24.16b, v30.16b , v18.16b //II vcltq_u8(pu1_cur_row, pu1_top_row)
222 LD1 {v18.16b},[x10] //pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
223 cmhi v5.16b, v3.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
224 cmhi v17.16b, v18.16b , v3.16b //vcltq_u8(pu1_cur_row, pu1_top_row
    [all...]
ihevc_itrans_recon_8x8.s 189 smull v18.4s, v3.4h, v1.h[2] //// y2 * sin2 (q3 is freed by this time)(part of d1)
239 smlsl v18.4s, v11.4h, v0.h[2] //// d1 = y2 * sin2 - y6 * cos2(part of a0 and a1)
252 sub v22.4s, v20.4s , v18.4s //// a2 = c1 - d1(part of x2,x5)
253 add v18.4s, v20.4s , v18.4s //// a1 = c1 + d1(part of x1,x6)
261 add v28.4s, v18.4s , v26.4s //// a1 + b1(part of x1)
262 sub v18.4s, v18.4s , v26.4s //// a1 - b1(part of x6)
272 sqrshrn v11.4h, v18.4s,#shift_stage1_idct //// x6 = (a1 - b1 + rnd) >> 7(shift_stage1_idct)
314 smull v18.4s, v3.4h, v1.h[2] //// y2 * sin2 (q3 is freed by this time)(part of d1
    [all...]
  /external/boringssl/linux-aarch64/crypto/aes/
aesv8-armx64.S 300 ld1 {v18.4s,v19.4s},[x7],#32
356 aese v0.16b,v18.16b
395 aese v0.16b,v18.16b
415 ld1 {v18.16b},[x0],#16
420 orr v19.16b,v18.16b,v18.16b
423 orr v1.16b,v18.16b,v18.16b
424 ld1 {v18.16b},[x0],#16
427 orr v19.16b,v18.16b,v18.16
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
variance_subpixel_altivec.asm 56 vspltisw v18, 3
57 vslw v18, v21, v18 ;# 0x00000040000000400000004000000040
69 ;# v18 rounding
97 vmsummbm v24, v20, v24, v18
98 vmsummbm v25, v20, v25, v18
109 vadduhm v22, v18, v22
111 vadduhm v23, v18, v23
120 vmrghh \P0, v22, v23 ;# v18 v19 = 16-bit result in order
166 compute_sum_sse \V, v16, v18, v19, v20, v21, v2
    [all...]
  /external/valgrind/none/tests/arm64/
memory.stdout.exp 122 0000000000000000 v18.d[0] (xor, xfer vecreg #2)
123 0000000000000000 v18.d[1] (xor, xfer vecreg #2)
152 0000000000000000 v18.d[0] (xor, xfer vecreg #2)
153 0000000000000000 v18.d[1] (xor, xfer vecreg #2)
182 0000000000000000 v18.d[0] (xor, xfer vecreg #2)
183 0000000000000000 v18.d[1] (xor, xfer vecreg #2)
212 0000000000000000 v18.d[0] (xor, xfer vecreg #2)
213 0000000000000000 v18.d[1] (xor, xfer vecreg #2)
242 0000000000000000 v18.d[0] (xor, xfer vecreg #2)
243 0000000000000000 v18.d[1] (xor, xfer vecreg #2
    [all...]
  /external/llvm/test/MC/Hexagon/
v60-permute.s 5 #CHECK: 1fd2d5cf { v15.b = vpack(v21.h{{ *}},{{ *}}v18.h):sat }
6 v15.b=vpack(v21.h,v18.h):sat
29 #CHECK: 1e01d256 { v23:22.h = vunpack(v18.b) }
30 v23:22.h=vunpack(v18.b)

Completed in 1204 milliseconds

1 2 3 4 5 6 7