/dalvik/vm/mterp/arm-vfp/ |
fbinop2addr.S | 10 mov r9, rINST, lsr #8 @ r9<- A+ 12 and r9, r9, #15 @ r9<- A 14 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 16 flds s0, [r9] @ s0<- vA 20 fsts s2, [r9] @ vAA<- s [all...] |
fbinopWide2addr.S | 11 mov r9, rINST, lsr #8 @ r9<- A+ 13 and r9, r9, #15 @ r9<- A 15 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 17 fldd d0, [r9] @ d0<- vA 21 fstd d2, [r9] @ vAA<- d [all...] |
funop.S | 9 mov r9, rINST, lsr #8 @ r9<- A+ 13 and r9, r9, #15 @ r9<- A 16 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 17 fsts s1, [r9] @ vA<- s1
|
funopNarrower.S | 9 mov r9, rINST, lsr #8 @ r9<- A+ 13 and r9, r9, #15 @ r9<- A 16 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 17 fsts s0, [r9] @ vA<- s0
|
funopWider.S | 9 mov r9, rINST, lsr #8 @ r9<- A+ 13 and r9, r9, #15 @ r9<- A 16 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 17 fstd d0, [r9] @ vA<- d0
|
fbinop.S | 10 mov r9, rINST, lsr #8 @ r9<- AA 21 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 22 fsts s2, [r9] @ vAA<- s2
|
fbinopWide.S | 10 mov r9, rINST, lsr #8 @ r9<- AA 21 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 22 fstd d2, [r9] @ vAA<- d2
|
/external/openssl/crypto/sha/asm/ |
sha1-armv4-large.s | 19 ldrb r9,[r1],#4 24 orr r9,r10,r9,lsl#8 26 orr r9,r11,r9,lsl#8 28 orr r9,r12,r9,lsl#8 29 add r7,r7,r9 @ E+=X[i] 30 str r9,[r14,#-4]! 34 ldrb r9,[r1],# [all...] |
/dalvik/vm/mterp/armv5te/ |
unopWide.S | 10 mov r9, rINST, lsr #8 @ r9<- A+ 12 and r9, r9, #15 14 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 20 stmia r9, {r0-r1} @ vAA<- r0/r1
|
unopWider.S | 10 mov r9, rINST, lsr #8 @ r9<- A+ 12 and r9, r9, #15 14 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 19 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
|
OP_GOTO.S | 11 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 12 mov r9, r9, lsl #1 @ r9<- byte offset 16 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 22 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
|
OP_MUL_LONG_2ADDR.S | 11 mov r9, rINST, lsr #8 @ r9<- A+ 13 and r9, r9, #15 15 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 19 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 25 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
|
binopWide2addr.S | 17 mov r9, rINST, lsr #8 @ r9<- A+ 19 and r9, r9, #15 21 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 23 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 33 stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
|
binopLit16.S | 17 mov r9, rINST, lsr #8 @ r9<- A+ 19 and r9, r9, #15 28 SET_VREG($result, r9) @ vAA<- $result
|
unop.S | 12 mov r9, rINST, lsr #8 @ r9<- A+ 14 and r9, r9, #15 19 SET_VREG(r0, r9) @ vAA<- r0
|
OP_CONST_WIDE.S | 8 mov r9, rINST, lsr #8 @ r9<- AA 11 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 13 stmia r9, {r0-r1} @ vAA<- r0/r1
|
binop2addr.S | 17 mov r9, rINST, lsr #8 @ r9<- A+ 19 and r9, r9, #15 21 GET_VREG(r0, r9) @ r0<- vA 31 SET_VREG($result, r9) @ vAA<- $result
|
bincmp.S | 16 mov r9, #4 @ r0<- BYTE branch dist for not-taken 19 FETCH_S(r9, 1) @ r9<- branch offset, in code units 20 movs r9, r9, asl #1 @ convert to bytes, check sign 25 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 28 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
|
zcmp.S | 13 mov r9, #4 @ r0<- BYTE branch dist for not-taken 16 FETCH_S(r9, 1) @ r9<- branch offset, in code units 17 movs r9, r9, asl #1 @ convert to bytes, check sign 22 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 28 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
|
/dalvik/vm/mterp/armv6t2/ |
unopWider.S | 11 ubfx r9, rINST, #8, #4 @ r9<- A 13 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 18 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
|
unopWide.S | 11 ubfx r9, rINST, #8, #4 @ r9<- A 13 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 19 stmia r9, {r0-r1} @ vAA<- r0/r1
|
OP_MUL_LONG_2ADDR.S | 12 ubfx r9, rINST, #8, #4 @ r9<- A 14 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 18 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 24 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
|
/dalvik/vm/compiler/template/armv5te/ |
TEMPLATE_MUL_LONG.S | 23 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 26 mov r0,r9
|
/external/openssl/crypto/bn/asm/ |
ppc.pl | 232 #.set r9,9 275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows: 279 # r9,r10, r11 are the equivalents of c1,c2, c3. 288 $UMULL r9,r5,r5 294 $ST r9,`0*$BNSZ`(r3) # r[0]=c1; 302 addze r9,r0 # catch carry if any. 303 # r9= r0(=0) and carry 307 addze r9,r9 314 adde r9,r8,r [all...] |
/external/libvpx/vp8/common/arm/armv6/ |
recon_v6.asm | 41 stmdb sp!, {r4 - r9, lr} 49 pkhtb r9, r7, r6, asr #16 ; 3 | 1 52 uxtab16 r9, r9, r4, ror #8 ; 3 | 1 + 0 | 3 | 2 | 1 55 usat16 r9, #8, r9 57 orr r8, r8, r9, lsl #8 69 pkhtb r9, r7, r6, asr #16 ; 3 | 1 72 uxtab16 r9, r9, r4, ror #8 ; 3 | 1 + 0 | 3 | 2 | [all...] |