HomeSort by relevance Sort by last modified time
    Searched refs:r8 (Results 76 - 100 of 298) sorted by null

1 2 34 5 6 7 8 91011>>

  /prebuilt/ndk/android-ndk-r4/platforms/android-5/arch-x86/usr/include/asm/
ptrace.h 55 unsigned long r8; member in struct:pt_regs
  /prebuilt/ndk/android-ndk-r4/platforms/android-8/arch-x86/usr/include/asm/
ptrace.h 55 unsigned long r8; member in struct:pt_regs
  /prebuilt/ndk/android-ndk-r6/platforms/android-9/arch-x86/usr/include/asm/
ptrace.h 55 unsigned long r8; member in struct:pt_regs
  /external/libffi/src/powerpc/
darwin.S 53 mr r12,r8 /* We only need r12 until the call,
57 mr r8,r1
65 stw r28,-16(r8)
66 stw r29,-12(r8)
67 stw r30,-8(r8)
68 stw r31,-4(r8)
70 stw r9,8(r8)
78 mr r28,r8 /* our AP. */
104 lwz r8,20+(6*4)(r1)
sysv.S 38 mr %r8,%r1
45 stw %r28,-16(%r8)
47 stw %r29,-12(%r8)
49 stw %r30, -8(%r8)
51 stw %r31, -4(%r8)
53 stw %r9, 4(%r8)
60 mr %r28,%r8 /* our AP. */
80 lwz %r8,-16-(3*4)(%r28)
  /external/libvpx/vp8/common/arm/armv6/
sixtappredict8x4_v6.asm 52 ldrb r8, [r0, #-3]
59 pkhbt r7, r7, r8, lsl #16 ; r8 | r7
61 pkhbt r8, r8, r9, lsl #16 ; r9 | r8
70 smlad r11, r8, r4, r11 ; vp8_filter[2], vp8_filter[3]
93 movne r6, r8
95 movne r8, r10
146 ldr r8, [sp, #8
    [all...]
iwalsh_v6.asm 31 ldr r8, [r0], #4 ; [13 | 12]
34 qadd16 r10, r2, r8 ; a1 [1+13 | 0+12]
37 qsub16 lr, r2, r8 ; d1 [1-13 | 0-12]
42 qsub16 r8, lr, r12 ; d1 - c1 [13 | 12]
93 qsubaddx r4, r8, r9 ; [c1|a1] [13-14 | 12+15]
94 qaddsubx r5, r8, r9 ; [b1|d1] [13+14 | 12-15]
98 qaddsubx r8, r4, r5 ; [b2|c2] [c1+d1 | a1-b1]
103 qadd16 r8, r8, r10 ; [b2+3|c2+3]
113 asr r4, r8, #3 ; [13 | x
    [all...]
  /external/zlib/contrib/masmx64/
inffasx64.asm 29 ; rax, rcx, rdx, r8, r-9, r10, and r11, which are scratch.
70 mov r8, r12 ; /* r8 = lmask */
89 mov r8, r12 ; /* r8 = lmask */
100 and r8, rdx ; /* r8 &= hold */
101 mov eax, [rbp+r8*4] ; /* eax = lcode[hold & lmask] */
110 mov r8, r12 ; /* r8 = lmask */
    [all...]
  /external/libvpx/vpx_scale/arm/armv4/
gen_scalers_armv4.asm 79 and r8, mask, r3, lsr #24 ; d = src[3]
83 orr r7, r8, r7, lsl #16 ; c | d
92 orr r9, r9, r8, lsl #16 ; d | e
115 and r8, mask, r3, lsl #24
119 orr r7, r8, r7, lsl #16 ; c | d
176 ldrb r8, [r3], r1 ; d = des[dest_pitch*3]
180 orr r7, r8, r7, lsl #16 ; c | d
187 orr r9, r9, r8, lsl #16 ; d | e
301 stmdb sp!, {r4 - r8, lr}
314 mla r8, lr, r7, r5 ; c * 8
    [all...]
  /external/zlib/contrib/gcc_gvmat64/
gvmat64.S 68 ; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12
176 ; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.
193 ; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)
195 ; this clear high 32 bits of r8, which can be garbage in both r8 and rdx
298 cmp bx,word ptr [rsi + r8 - 1]
306 movzx r8d, word ptr [rdi + r8*2]
318 cmp bx,word ptr [rsi + r8 - 1]
326 movzx r8d, word ptr [rdi + r8*2]
337 cmp bx,word ptr [rsi + r8 - 1]
    [all...]
  /external/v8/test/cctest/
test-disasm-arm.cc 120 COMPARE(eor(r4, r5, Operand(r8, LSL, 2), LeaveCC, ne),
121 "10254108 eorne r4, r5, r8, lsl #2");
143 COMPARE(add(r7, r8, Operand(ip, ASR, 1)),
144 "e08870cc add r7, r8, ip, asr #1");
145 COMPARE(add(r7, r8, Operand(ip, ASR, 0)),
146 "e088704c add r7, r8, ip, asr #32");
147 COMPARE(add(r7, r8, Operand(ip), SetCC),
148 "e098700c adds r7, r8, ip");
149 COMPARE(add(r7, r8, Operand(ip, ASR, 31), SetCC, vs),
150 "60987fcc addvss r7, r8, ip, asr #31")
    [all...]
  /bootable/bootloader/legacy/usbloader/
init.S 69 mov r8, r1
90 mov r1, r8
  /external/clang/test/CXX/except/except.spec/
p3.cpp 36 extern void (*r8)() throw(int);
37 extern void (*r8)() throw(const int);
  /external/libvpx/vp8/decoder/arm/armv6/
dequantize_v6.asm 32 smultt r8, r3, r4
43 strh r8, [r2], #2
44 smultt r8, r3, r4
58 strh r8, [r2], #2
  /external/llvm/test/MC/X86/
x86_64-encoding.s 85 // CHECK: movd %r8, %mm1
87 movd %r8, %mm1
101 // CHECK: movd %mm1, %r8
103 movd %mm1, %r8
  /external/openssl/crypto/
ia64cpuid.S 14 { .mib; mov r8=ar.itc
27 add r8=r2,r33
30 cmpxchg4.acq r2=[r32],r8,ar.ccv
36 sxt4 r8=r8
60 mov r8=pr
124 mov pr=r8,0x1ffff }
125 { .mib; mov r8=sp
  /frameworks/base/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
cor_h_vec_opt.s 55 LDRSH r8, [r9], #2
58 MLA r5, r12, r8, r5
72 ADD r8, r7, #32
82 LDRSH r11, [r8] @*p3++
101 LDRSH r8, [r9], #2
104 MLA r5, r12, r8, r5
119 ADD r8, r7, #32
129 LDRSH r11, [r8] @*p3++
Deemph_32_opt.s 45 MOV r8, r5, ASR #1 @fac = mu >> 1
49 MUL r9, r5, r8
62 MUL r9, r14, r8
74 MUL r9, r14, r8
86 MUL r9, r14, r8
  /frameworks/base/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
cor_h_vec_neon.s 56 LDRSH r8, [r9], #2
59 MLA r5, r12, r8, r5
73 ADD r8, r7, #32
83 LDRSH r11, [r8] @*p3++
102 LDRSH r8, [r9], #2
105 MLA r5, r12, r8, r5
120 ADD r8, r7, #32
130 LDRSH r11, [r8] @*p3++
Deemph_32_neon.s 45 MOV r8, r5, ASR #1 @fac = mu >> 1
49 MUL r9, r5, r8
62 MUL r9, r14, r8
74 MUL r9, r14, r8
86 MUL r9, r14, r8
  /frameworks/base/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_UnpackBlock4x4_s.S 16 PUSH {r4-r8,lr}
38 POP {r4-r8,pc}
  /external/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_copyframeyonly_neon.asm 41 mov r8, r2
48 vld1.8 {q0, q1}, [r8]!
50 vld1.8 {q2, q3}, [r8]!
52 vld1.8 {q4, q5}, [r8]!
54 vld1.8 {q6, q7}, [r8]!
143 mul r8, r3, lr
150 sub r5, r1, r8 ;destptr1
183 sub r5, r1, r8
226 sub r5, r1, r8
273 mul r8, r3, l
    [all...]
  /external/libvpx/vp8/encoder/arm/armv6/
vp8_variance_halfpixvar16x16_hv_armv6.asm 28 mov r8, #0 ; initialize sum = 0
65 adds r8, r8, r4 ; add positive differences to sum
66 subs r8, r8, r5 ; substract negative differences from sum
107 add r8, r8, r4 ; add positive differences to sum
108 sub r8, r8, r5 ; substract negative differences from sum
149 add r8, r8, r4 ; add positive differences to su
    [all...]
  /external/openssl/crypto/bn/asm/
s390x.S 28 lghi %r8,0 // carry = 0
37 alcgr %r7,%r8 // +=carry
43 mlgr %r8,%r5
45 alcgr %r8,zero
51 alcgr %r7,%r8
57 mlgr %r8,%r5
59 alcgr %r8,zero
70 alcgr %r8,zero // collect carry bit
71 lgr %r2,%r8
78 alcgr %r7,%r8 // +=carr
    [all...]
  /frameworks/base/media/libstagefright/codecs/mp3dec/src/asm/
pvmp3_dct_9_arm.s 73 add r8,r9,r2
75 add r10,r7,r8
76 rsb r7,r8,r7,asr #1
85 mov r8,r7
89 smlal r1,r8,r11,r9
114 smlal r12,r8,r11,r1
123 smlal r12,r8,r6,r1
148 str r8,[r0, #0x10]
155 smlal r8,r6,r7,r12

Completed in 1324 milliseconds

1 2 34 5 6 7 8 91011>>