HomeSort by relevance Sort by last modified time
    Searched full:r11 (Results 326 - 350 of 1806) sorted by null

<<11121314151617181920>>

  /toolchain/binutils/binutils-2.25/ld/testsuite/ld-powerpc/
tlstocso.d 49 .* (7d 68 02 a6|a6 02 68 7d) mflr r11
50 .* (e8 4b ff f0|f0 ff 4b e8) ld r2,-16\(r11\)
52 .* (7d 62 5a 14|14 5a 62 7d) add r11,r2,r11
53 .* (e9 8b 00 00|00 00 8b e9) ld r12,0\(r11\)
54 .* (e8 4b 00 08|08 00 4b e8) ld r2,8\(r11\)
56 .* (e9 6b 00 10|10 00 6b e9) ld r11,16\(r11\)
  /toolchain/binutils/binutils-2.25/ld/testsuite/ld-x86-64/
tlsgd7.dd 12 [ ]*[a-f0-9]+: 49 bb ([0-9a-f]{2} ){8} movabs \$0x[0-9a-f]+,%r11
16 [ ]*[a-f0-9]+: 4c 01 db add %r11,%rbx
  /external/sonivox/arm-wt-22k/lib_src/
ARM-E_interpolate_loop_gnu.s 54 phaseFracMask .req r11
56 @SaveRegs RLIST {r4-r11,lr}
57 @RestoreRegs RLIST {r4-r11,pc}
62 STMFD sp!,{r4-r11,lr}
126 LDMFD sp!,{r4-r11,lr}
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
Deemph_32_opt.s 41 MOV r11, #0x8000
54 QADD r10, r12, r11
65 QADD r10, r12, r11
79 QADD r10, r12, r11
91 QADD r10, r12, r11
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
Deemph_32_neon.s 41 MOV r11, #0x8000
54 QADD r10, r12, r11
65 QADD r10, r12, r11
79 QADD r10, r12, r11
91 QADD r10, r12, r11
  /external/boringssl/win-x86_64/crypto/aes/
vpaes-x86_64.asm 27 mov r11,16
54 movdqa xmm1,XMMWORD[((-64))+r10*1+r11]
56 movdqa xmm4,XMMWORD[r10*1+r11]
65 add r11,16
68 and r11,0x30
102 movdqa xmm1,XMMWORD[64+r10*1+r11]
121 mov r11,rax
124 shl r11,4
128 xor r11,0x30
131 and r11,0x3
    [all...]
  /external/llvm/test/CodeGen/XCore/
threads.ll 75 ; CHECK: get r11, id
80 ; CHECK: lmul {{r[0-9]}}, r0, r11, [[R2]], [[R0]], [[R1]]
86 ; CHECK: get r11, id
87 ; CHECK: shl [[R0:r[0-9]]], r11, 3
96 ; CHECK: get r11, id
97 ; CHECK: shl [[R0:r[0-9]]], r11, 3
109 ; PHINODE: get r11, id
111 ; PHINODE: get r11, id
129 ; PHINODE: get r11, id
131 ; PHINODE: get r11, i
    [all...]
  /external/boringssl/src/crypto/bn/asm/
rsaz-x86_64.pl 163 movq %rdx, %r11
164 adcq \$0, %r11
167 addq %rax, %r11
212 addq %rax, %r11
215 addq %rbx, %r11
254 movq %r11, %rbx
255 adcq %r11, %r11 #shld \$1, %r10, %r11
260 adcq \$0, %r11
    [all...]
  /external/libhevc/common/arm/
ihevc_intra_pred_filters_luma_mode_19_to_25.s 190 ldrb r11, [r1]
191 strb r11, [r6]
211 ldrb r11, [r1, -r14]
212 @ ldrb r11, [r1, -r7, lsr #8]
213 strb r11, [r6], #-1
257 mov r11,#1
276 vld1.8 {d8},[r10],r11 @(i row)ref_main_idx
286 vld1.8 {d12},[r12],r11 @(ii)ref_main_idx
300 vld1.8 {d16},[r10],r11 @(iii)ref_main_idx
306 vld1.8 {d20},[r12],r11 @(iv)ref_main_id
    [all...]
ihevc_sao_edge_offset_class3.s 99 MLA r11,r10,r1,r0 @pu1_src[(ht - 1) * src_strd + col]
103 VLD1.8 D0,[r11]! @pu1_src[(ht - 1) * src_strd + col]
115 LDR r11,[sp,#0xC0] @Load pu1_src_top_right from sp
118 LDRB r11,[r11] @pu1_src_top_right[0]
119 SUB r12,r9,r11 @pu1_src[wd - 1] - pu1_src_top_right[0]
121 ADD r11,r0,r1 @pu1_src + src_strd
123 LDRB r14,[r11,r10] @pu1_src[wd - 1 - 1 + src_strd]
126 SUB r11,r9,r14 @pu1_src[wd - 1] - pu1_src[wd - 1 - 1 + src_strd]
129 CMP r11,#
    [all...]
ihevc_deblk_luma_vert.s 118 ldrb r11,[r0,#-1] @-1 value
130 add r8,r8,r11
146 ldrb r11,[r14,#-1] @ -1 value
161 add r2,r2,r11
162 subs r11,r2,r10,lsl #1
163 rsbmi r11,r11,#0 @ dp3 value is stored in r8
169 add r4,r11,r12 @ r4 has the d3 value
175 add r14,r8,r11 @ r13 has the value dp
180 add r11, r3, r4 @ r3 has the value
    [all...]
ihevc_sao_edge_offset_class0.s 79 ADD r11,r3,r9 @pu1_src_top[wd]
83 LDRB r12,[r11,#-1] @pu1_src_top[wd - 1]
140 LDRB r11,[r2] @load pu1_src_left since ht - row =0 when it comes first pu1_src_left is incremented later
147 VMOV.8 D15[7],r11 @vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15)
156 LDRB r11,[r2, #1] @II Iteration load pu1_src_left since ht - row + 1 =1
161 VMOV.8 D29[7],r11 @II Iteration vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15)
168 LDRB r11,[r12,#16] @pu1_src_cpy[16]
173 VMOV.8 D14[0],r11 @pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0)
176 LDRB r11,[r12,#16] @II pu1_src_cpy[16]
181 VMOV.8 D28[0],r11 @II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0
    [all...]
ihevc_inter_pred_chroma_copy.s 119 sub r11,r12,#4
145 sub r0,r5,r11 @pu1_src = pu1_src_tmp
146 sub r1,r6,r11 @pu1_dst = pu1_dst_tmp
173 sub r11,r12,#8
197 sub r0,r5,r11 @pu1_src = pu1_src_tmp
198 sub r1,r6,r11 @pu1_dst = pu1_dst_tmp
220 sub r11,r12,#16
244 sub r0,r5,r11 @pu1_src = pu1_src_tmp
245 sub r1,r6,r11 @pu1_dst = pu1_dst_tmp
  /external/libvpx/libvpx/vpx_dsp/arm/
variance_media.asm 35 mov r11, #0 ; initialize sse = 0
63 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
68 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
87 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1)
92 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
111 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1
    [all...]
  /external/boringssl/linux-arm/crypto/bn/
armv4-mont.S 45 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
61 umull r10,r11,r5,r2 @ ap[0]*bp[0]
70 mov r10,r11
72 mov r11,#0
73 umlal r10,r11,r5,r2 @ ap[j]*bp[0]
82 adds r12,r12,r11
100 mov r11,#0
101 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
110 adds r10,r11,r7 @ +=tp[j]
112 mov r11,#
    [all...]
  /bionic/libc/arch-arm/generic/bionic/
memcpy.S 59 /* Making room for r5-r11 which will be spilled later */
100 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
103 stmea sp, {r5-r11}
154 1: ldmia r1!, { r4-r11 }
163 stmia r0!, { r4-r11 }
196 1: ldmfd sp!, {r5-r11}
213 /* Use post-increment mode for stm to spill r5-r11 to reserved stack
216 stmea sp, {r5-r11}
271 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
289 orr r10, r10, r11, lsl #1
    [all...]
  /bionic/libc/arch-x86_64/bionic/
setjmp.S 78 xorq \reg,%r11
130 movq (%rsp),%r11
139 movq %r11,(_JB_PC * 8)(%rdi)
184 movq (_JB_PC * 8)(%r12),%r11
190 pushq %r11
193 popq %r11
201 movq %r11,0(%rsp)
  /external/libhevc/decoder/arm/
ihevcd_fmt_conv_420sp_to_420p.s 103 SUB r11,r5,r8 @// Dst Y increment
136 ADD r2, r2, r11
151 MOV r11,r8,LSR #1
152 SUB r11,r5,r11 @// Dst U and V increment
191 ADD r3, r3, r11
192 ADD r5, r5, r11
  /external/llvm/test/CodeGen/X86/
patchpoint.ll 9 ; CHECK: movabsq $-559038736, %r11
10 ; CHECK-NEXT: callq *%r11
13 ; CHECK: callq *%r11
31 ; CHECK: movabsq $_foo, %r11
32 ; CHECK-NEXT: callq *%r11
94 ; CHECK: movabsq $6153737369414576827, %r11
95 ; CHECK-NEXT: callq *%r11
preserve_mostcc64.ll 4 ; Every GPR should be saved - except r11
37 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
41 ; Make sure R11 and XMMs are saved before the call
46 ;SSE: movq %r11, [[REG:%[a-z0-9]+]]
61 ;SSE: movq [[REG]], %r11
68 %a6 = call i64 asm sideeffect "", "={r11}"() nounwind
84 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
  /external/llvm/test/MC/Disassembler/ARM/
move-banked-regs-arm.txt 15 @ CHECK: mrs r11, r12_usr
31 @ CHECK: mrs r11, r12_fiq
61 @ CHECK: mrs r11, sp_und
89 @ CHECK: msr r12_usr, r11
105 @ CHECK: msr r12_fiq, r11
115 @ CHECK: msr SPSR_irq, r11
135 @ CHECK: msr sp_und, r11
move-banked-regs-thumb.txt 14 @ CHECK: mrs r11, r12_usr
30 @ CHECK: mrs r11, r12_fiq
60 @ CHECK: mrs r11, sp_und
91 @ CHECK: msr r12_usr, r11
107 @ CHECK: msr r12_fiq, r11
117 @ CHECK: msr SPSR_irq, r11
137 @ CHECK: msr sp_und, r11
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/crx/
arith_insn.s 23 addcb r11 , r12
47 orb r10 , r11
75 adduw $0x20 , r11
100 cmpw $0x11 , r11
125 subcw r11 , r12
158 addcd r11 , r12
186 ord r10 , r11
  /external/libavc/common/arm/
ih264_inter_pred_luma_copy_a9q.s 89 sub r11, r12, #4
114 sub r0, r5, r11 @pu1_src = pu1_src_tmp
115 sub r1, r6, r11 @pu1_dst = pu1_dst_tmp
125 sub r11, r12, #8
147 sub r0, r5, r11 @pu1_src = pu1_src_tmp
148 sub r1, r6, r11 @pu1_dst = pu1_dst_tmp
155 sub r11, r12, #16
177 sub r0, r5, r11 @pu1_src = pu1_src_tmp
178 sub r1, r6, r11 @pu1_dst = pu1_dst_tmp
  /prebuilts/go/darwin-x86/src/crypto/rc4/
rc4_amd64p32.s 71 LEAL (R10)(DX*4), R11
72 MOVBLZX (R11), BX // ty = d[y]
77 MOVB AX, (R11) // d[y] = tx
112 LEAL (R10)(DX*4), R11; \
113 MOVBLZX (R11), R8; \
114 MOVB r1, (R11); \
173 LEAL (R10)(DX*4), R11
174 MOVBLZX (R11), BX // ty = d[y]
179 MOVB AX, (R11) // d[y] = tx

Completed in 555 milliseconds

<<11121314151617181920>>