/dalvik/vm/compiler/template/armv5te/ |
TEMPLATE_MUL_LONG.S | 23 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 25 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 27 mov r1,r10
|
TEMPLATE_STRING_COMPARETO.S | 26 ldr r10, [r1, #STRING_FIELDOFF_COUNT] 34 * count: r7/r10 37 * r10 <- minCount 39 subs r11, r7, r10 40 movls r10, r7 56 * r10: iteration count for comparison 62 subs r10, #2 77 cmp r10, #28 79 subs r10, #3 93 subs r10, # [all...] |
/external/llvm/test/MC/X86/ |
x86_64-bmi-encoding.s | 7 // CHECK: blsmskq %r11, %r10 9 blsmskq %r11, %r10 15 // CHECK: blsmskq (%rax), %r10 17 blsmskq (%rax), %r10 23 // CHECK: blsiq %r11, %r10 25 blsiq %r11, %r10 31 // CHECK: blsiq (%rax), %r10 33 blsiq (%rax), %r10 39 // CHECK: blsrq %r11, %r10 41 blsrq %r11, %r10 [all...] |
/frameworks/base/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/ |
armVCM4P10_InterpolateLuma_Align_unsafe_s.S | 26 LDM r0,{r7,r10,r11} 29 STM r8!,{r7,r10,r11} 33 LDM r0,{r7,r10,r11} 37 ORR r7,r7,r10,LSL #24 38 LSR r10,r10,#8 39 ORR r10,r10,r11,LSL #24 41 STM r8!,{r7,r10,r11} 45 LDM r0,{r7,r10,r11 [all...] |
armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S | 22 LDR r10,[r0,#8] 26 UQSUB16 r10,r10,r6 30 USAT16 r10,#13,r10 34 AND r10,r12,r10,LSR #5 37 ORR r11,r10,r11,LSL #8 38 ORR r10,r4,r5,LSL #8 40 STRD r10,r11,[r7],# [all...] |
omxVCM4P10_PredictIntra_16x16_s.S | 47 ADD r10,r5,r5 48 VST1.8 {d0,d1},[r3],r10 49 VST1.8 {d0,d1},[r8],r10 50 VST1.8 {d0,d1},[r3],r10 51 VST1.8 {d0,d1},[r8],r10 52 VST1.8 {d0,d1},[r3],r10 53 VST1.8 {d0,d1},[r8],r10 54 VST1.8 {d0,d1},[r3],r10 55 VST1.8 {d0,d1},[r8],r10 56 VST1.8 {d0,d1},[r3],r10 [all...] |
omxVCM4P10_PredictIntraChroma_8x8_s.S | 28 PUSH {r4-r10,lr} 40 ADD r10,r4,r4 41 VLD1.8 {d1[0]},[r0],r10 42 VLD1.8 {d1[1]},[r9],r10 43 VLD1.8 {d1[2]},[r0],r10 44 VLD1.8 {d1[3]},[r9],r10 45 VLD1.8 {d1[4]},[r0],r10 46 VLD1.8 {d1[5]},[r9],r10 47 VLD1.8 {d1[6]},[r0],r10 70 ADD r10,r5,r [all...] |
armVCM4P10_Average_4x_Align_unsafe_s.S | 19 LDR r10,[r0],r1 24 UHSUB8 r5,r10,r12 30 LDR r10,[r0],r1 35 UHSUB8 r5,r10,r12 51 LDR r10,[r0],r1 58 LSR r10,r10,#16 59 ORR r10,r10,r4,LSL #16 62 UHSUB8 r5,r10,r1 [all...] |
/frameworks/base/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
Deemph_32_opt.s | 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 48 MOV r10, r12, LSL #3 @L_tmp <<= 3 51 QDADD r10, r10, r9 53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac) 54 QADD r10, r12, r11 55 MOV r14, r10, ASR #16 @y[0] = round(L_tmp) 58 MOV r10, r6, LSL #16 59 ADD r12, r10, r7, LSL # [all...] |
pred_lt4_1_opt.s | 66 SMULBB r10, r4, r3 @x[0] * h[0] 71 SMLABT r10, r6, r3, r10 @x[1] * h[1] 77 SMLABB r10, r9, r3, r10 @x[2] * h[2] 82 SMLABT r10, r4, r3, r10 @x[3] * h[3] 88 SMLABB r10, r6, r3, r10 @x[4] * h[4] 93 SMLABT r10, r9, r3, r10 @x[5] * h[5 [all...] |
/frameworks/base/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Deemph_32_neon.s | 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 48 MOV r10, r12, LSL #3 @L_tmp <<= 3 51 QDADD r10, r10, r9 53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac) 54 QADD r10, r12, r11 55 MOV r14, r10, ASR #16 @y[0] = round(L_tmp) 58 MOV r10, r6, LSL #16 59 ADD r12, r10, r7, LSL # [all...] |
/device/samsung/crespo/sec_mm/sec_omx/sec_codecs/video/mfc_c110/csc/ |
csc_nv12t_yuv420_uv_neon.s | 64 @r10 temp1 83 and r10, r5, #0x1 84 cmp r10, #0x1 88 add r10, r3, #127 @ temp1 = ((linear_x_size+127)>>7)<<7@ 89 bic r10, #0x7F 90 mov r10, r10, asr #6 @ tiled_offset = tiled_offset*(temp1>>6)@ 91 mul r7, r7, r10 105 add r10, r3, #127 @ temp1 = ((linear_x_size+127)>>7)<<7@ 106 bic r10, #0x7 [all...] |
/system/core/libpixelflinger/ |
rotate90CW_4x4_16v6.S | 36 stmfd sp!, {r4,r5, r6,r7, r8,r9, r10,r11, lr} 45 pkhbt r10, r8, r6, lsl #16 47 strd r10, r11, [r0], r12 49 pkhtb r10, r6, r8, asr #16 52 strd r10, r11, [r0], r12 53 pkhbt r10, r9, r7, lsl #16 56 strd r10, r11, [r0], r12 58 pkhtb r10, r7, r9, asr #16 60 strd r10, r11, [r0] 62 ldmfd sp!, {r4,r5, r6,r7, r8,r9, r10,r11, pc [all...] |
/dalvik/vm/mterp/armv5te/ |
OP_MUL_LONG_2ADDR.S | 19 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 23 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 25 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
|
OP_INVOKE_SUPER_QUICK.S | 11 FETCH(r10, 2) @ r10<- GFED or CCCC 14 and r10, r10, #15 @ r10<- D (or stays CCCC) 20 GET_VREG(r9, r10) @ r9<- "this"
|
OP_INVOKE_SUPER.S | 11 FETCH(r10, 2) @ r10<- GFED or CCCC 14 and r10, r10, #15 @ r10<- D (or stays CCCC) 18 GET_VREG(r9, r10) @ r9<- "this" ptr 21 ldr r10, [rSELF, #offThread_method] @ r10<- current method 24 ldr r10, [r10, #offMethod_clazz] @ r10<- method->claz [all...] |
OP_MUL_LONG.S | 29 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 32 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 40 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
|
/dalvik/vm/mterp/armv6t2/ |
OP_MUL_LONG_2ADDR.S | 18 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 22 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 24 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
|
/external/valgrind/main/coregrind/m_mach/ |
mach_traps-amd64-darwin.S | 40 movq %rcx, %r10 51 // movq %rcx, %r10 61 movq %rcx, %r10 71 movq %rcx, %r10 81 movq %rcx, %r10 91 movq %rcx, %r10 101 movq %rcx, %r10 111 movq %rcx, %r10 121 movq %rcx, %r10 131 movq %rcx, %r10 [all...] |
/external/openssl/crypto/sha/asm/ |
sha1-armv4-large.s | 20 ldrb r10,[r1,#-1] 25 orr r9,r10,r9,lsl#24 26 eor r10,r5,r6 @ F_xx_xx 29 and r10,r4,r10,ror#2 31 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) 33 add r7,r7,r10 @ E+=F_00_19(B,C,D) 35 ldrb r10,[r1,#-1] 40 orr r9,r10,r9,lsl#2 [all...] |
sha512-armv4.s | 60 ldr r10, [r0,#48+0] 65 str r10, [sp,#48+4] 73 ldr r10, [r0,#16+0] 79 str r10, [sp,#16+4] 90 ldrb r10, [r1,#5] 96 orr r3,r3,r10,lsl#16 97 ldrb r10, [r1],#8 101 orr r4,r4,r10,lsl#24 110 mov r10,r8,lsr#14 112 eor r10,r10,r7,lsl#1 [all...] |
/external/openssl/crypto/bn/asm/ |
ppc.pl | 233 #.set r10,10 275 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows: 279 # r9,r10, r11 are the equivalents of c1,c2, c3. 289 $UMULH r10,r5,r5 #in first iteration. No need 305 addc r10,r7,r10 # now add to temp result. 309 $ST r10,`1*$BNSZ`(r3) #r[1]=c2; 315 addze r10,r0 323 addze r10,r10 [all...] |
/frameworks/base/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/ |
band_nrg_v5.s | 41 ldrsh r10, [r1, r2] 45 cmp r10, r2 49 ldr r11, [r0, +r10, lsl #2] 50 add r10, r10, #1 51 ldr r6, [r0, +r10, lsl #2] 53 add r10, r10, #1 55 ldr r11, [r0, +r10, lsl #2] 57 add r10, r10, # [all...] |
/external/libvpx/vp8/common/arm/armv6/ |
loopfilter_v6.asm | 76 ldr r10, [src], pstep ; p2 89 uqsub8 r6, r9, r10 ; p3 - p2 90 uqsub8 r7, r10, r9 ; p2 - p3 91 uqsub8 r8, r10, r11 ; p2 - p1 92 uqsub8 r10, r11, r10 ; p1 - p2 95 orr r8, r8, r10 ; abs (p2-p1) 102 ldr r10, [src], pstep ; q1 108 uqsub8 r6, r11, r10 ; p1 - q1 109 uqsub8 r7, r10, r11 ; q1 - p [all...] |
/external/tremolo/Tremolo/ |
bitwiseARM.s | 45 STMFD r13!,{r10,r11,r14} 53 LDR r10,[r3] @ r10= ptr[0] 57 MOV r10,r10,LSR r14 @ r10= ptr[0]>>(32-bitsLeftInWord) 60 ORRLT r10,r10,r11,LSL r14 @ r10= Next 32 bits. 63 AND r0,r10,r1 [all...] |