/external/libvpx/libvpx/vp8/encoder/arm/armv5te/ |
vp8_packtokens_armv5.asm | 29 push {r0-r3, r12, lr} ; rest of regs are preserved by subroutine call 35 pop {r0-r3, r12, lr} 46 push {r4-r12, lr} 79 ; v is kept in r12 during the token pack loop 80 lsl r12, r6, r4 ; r12 = v << 32 - n 90 lsls r12, r12, #1 ; bb = v >> n 168 add r12, r7, r6, lsl #4 ; b = vp8_extra_bits + t 170 ldr r4, [r12, #vp8_extra_bit_struct_base_val [all...] |
vp8_packtokens_mbrow_armv5.asm | 29 push {r0-r3, r12, lr} ; rest of regs are preserved by subroutine call 35 pop {r0-r3, r12, lr} 45 push {r4-r12, lr} 100 ; v is kept in r12 during the token pack loop 101 lsl r12, r6, r4 ; r12 = v << 32 - n 111 lsls r12, r12, #1 ; bb = v >> n 189 add r12, r7, r6, lsl #4 ; b = vp8_extra_bits + t 191 ldr r4, [r12, #vp8_extra_bit_struct_base_val [all...] |
vp8_packtokens_partitions_armv5.asm | 28 push {r0-r3, r12, lr} ; rest of regs are preserved by subroutine call 34 pop {r0-r3, r12, lr} 46 push {r4-r12, lr} 129 ; v is kept in r12 during the token pack loop 130 lsl r12, r6, r4 ; r12 = v << 32 - n 140 lsls r12, r12, #1 ; bb = v >> n 218 add r12, r7, r6, lsl #4 ; b = vp8_extra_bits + t 220 ldr r4, [r12, #vp8_extra_bit_struct_base_val [all...] |
/external/openssl/crypto/modes/asm/ |
ghash-x86_64.S | 9 pushq %r12 97 pushq %r12 327 movzbq (%rsp,%rbx,1),%r12 329 xorq %r8,%r12 332 movzbq %r12b,%r12 342 movzwq (%r11,%r12,2),%r12 347 shlq $48,%r12 350 xorq %r12,%r9 365 movzbq (%rsp,%rbx,1),%r12 [all...] |
/dalvik/vm/compiler/template/armv5te/ |
TEMPLATE_SAVE_STATE.S | 9 * The handler must save regMap, r0-r12 and then return with r0-r12 19 stmia r0!, {r1-r12}
|
TEMPLATE_INVOKE_METHOD_CHAIN.S | 17 add r12, lr, #2 @ setup the punt-to-interp address 20 bxlo r12 @ return to raise stack overflow excep. 31 bxne r12 @ bail to the interpreter
|
/dalvik/vm/compiler/template/armv5te-vfp/ |
TEMPLATE_SAVE_STATE.S | 9 * The handler must save regMap, r0-r12 and then return with r0-r12 19 stmia r0!, {r1-r12}
|
/external/clang/test/CXX/except/except.spec/ |
p3.cpp | 53 extern void (*r12)() noexcept; // expected-note {{previous declaration}} 54 extern void (*r12)() noexcept(false); // expected-error {{does not match}}
|
/external/libffi/src/powerpc/ |
aix.S | 39 .set r12,12 102 mr r12,r8 // We only need r12 until the call, so it doesn't have to be saved... 130 lwz r2,4(r12) 131 lwz r12,0(r12) 132 mtctr r12 // r12 holds address of _ffi_prep_args 137 lwz r12,0(r29) 141 mtctr r12 [all...] |
/external/libvpx/libvpx/vp8/encoder/arm/armv6/ |
vp8_fast_quantize_b_armv6.asm | 52 ldr r12, [r3], #4 ; [z3 | z2] 59 ssat16 r11, #1, r12 ; [sz3 | sz2] 60 eor r12, r12, r11 ; [z3 ^ sz3 | z2 ^ sz2] 63 ssub16 r12, r12, r11 ; x = (z ^ sz) - sz 65 sadd16 r12, r12, r10 ; [x3+r3 | x2+r2] 69 smulbb r10, r12, r9 ; [(x2+r2)*q2] 70 smultt r12, r12, r9 ; [(x3+r3)*q3 [all...] |
walsh_v6.asm | 55 smuad r12, r7, lr ; C0 = a1<<2 + d1<<2 56 addne r12, r12, #1 ; C0 += (a1!=0) 58 add r0, r11, r12 ; a1_0 = A0 + C0 59 sub r11, r11, r12 ; b1_0 = A0 - C0 62 smuad r12, r5, lr ; B0 = a1<<2 + d1<<2 63 addne r12, r12, #1 ; B0 += (a1!=0) 69 add lr, r12, r2 ; d1_0 = B0 + D0 70 sub r12, r12, r2 ; c1_0 = B0 - D [all...] |
/external/llvm/test/MC/MachO/ARM/ |
thumb2-movw-fixup.s | 12 movw r12, :lower16:L2 13 movt r12, :upper16:L2
|
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/ |
PrePostMDCT_v5.s | 44 smull r10, r12, r7, r8 @ MULHIGH(ti1, cosa) 50 sub r7, r12, r10 @ MULHIGH(ti1, cosa) - MULHIGH(tr1, sina) 56 smull r10, r12, r5, r8 @ MULHIGH(ti2, cosa) 62 sub r9, r12, r10 98 smull r10, r12, r5, r8 @ MULHIGH(ti1, cosa) 104 sub r11, r10, r12 @ MULHIGH(ti1, cosa) - MULHIGH(tr1, sina)@ 110 smull r10, r12, r7, r8 @ MULHIGH(ti2, cosa) 116 sub r5, r10, r12 @ MULHIGH(sinb, tr2) - MULHIGH(cosb, ti2)@
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
Syn_filt_32_neon.s | 41 STMFD r13!, {r4 - r12, r14} 78 MUL r12, r6, r3 @exc[i] * a0 97 MOV r14, r12, LSL #1 @exc[i] * a0 << 1 118 SUB r12, r11, r10, LSL #12 119 @MOV r11, r12, ASR #16 @sig_lo[i] 120 VDUP.S16 D21, r12 122 STRH r12, [r5], #2 @stroe sig_lo[i] 129 LDMFD r13!, {r4 - r12, r15}
|
/system/core/libpixelflinger/ |
t32cb16blend.S | 136 // r12: scratch 155 pixel r3, r4, r12, 0 156 strh r12, [r0], #2 175 pixel r3, r4, r12, 0 176 pixel r3, r5, r12, 1 179 str r12, [r0, #-4] 190 pixel r3, r4, r12, 0 191 pixel r3, r5, r12, 16 192 str r12, [r0, #-4]
|
/external/tremolo/Tremolo/ |
floor1LARM.s | 47 MOV r12,r13 49 LDMFD r12,{r11,r12,r14} @ r11 = err 50 @ r12 = adx 58 ADDLT r11,r11,r12 @ if (err < 0) err+=adx
|
/external/webrtc/src/modules/audio_coding/codecs/isac/fix/source/ |
lpc_masking_model_neon.S | 62 mov r12, r7 @ &a_polynomial[i] 66 ldr r8, [r12], #4 111 ldrsh r12, [r9] 112 mul r8, r8, r12 164 sub r12, r4, #32 165 add r12, r12, r5, asl #1 166 add r1, r12, r1 @ add 1st part of shift_internal. 167 add r12, r1, r2 @ add 2nd part of shift_internal. 169 add r3, r12, r3 @ value for q_val_residual_energy [all...] |
/external/libvpx/libvpx/vp8/common/ppc/ |
filter_bilinear_altivec.asm | 36 li r12, 32 64 ;# r12 32 169 oris r12, r11, 0xf830 170 ori r12, r12, 0xfff8 171 mtspr 256, r12 ;# set VRSAVE 178 load_c v10, b_0123_b, 0, r9, r12 179 load_c v11, b_4567_b, 0, r9, r12 254 oris r12, r11, 0xf830 255 ori r12, r12, 0xfff [all...] |
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/ |
armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S | 16 PUSH {r4-r12,lr} 18 ADD r12,r0,r1,LSL #2 21 VLD1.8 {d10,d11},[r12],r1 23 VLD1.8 {d12,d13},[r12],r1 31 VLD1.8 {d14,d15},[r12],r1 33 VLD1.8 {d16,d17},[r12],r1 115 POP {r4-r12,pc}
|
omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S | 16 PUSH {r4-r12,lr} 31 LDRH r12,[r4],#4 32 CMP r12,#0 60 TST r12,#0xff 65 TST r12,#0xff00 71 TST r12,#4 153 POP {r4-r12,pc}
|
armVCM4P10_Interpolate_Chroma_s.S | 28 PUSH {r4-r12,lr} 40 SMULBB r12,r8,r9 45 VDUP.8 d12,r12 89 POP {r4-r12,pc} 112 POP {r4-r12,pc} 135 POP {r4-r12,pc} 148 POP {r4-r12,pc} 159 POP {r4-r12,pc} 170 POP {r4-r12,pc}
|
/external/jpeg/ |
armv6_idct.S | 36 * 16-bit constants; r12 and r14 hold two of the four arguments; and r8 hold 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 79 // r12 = quans, r14 = coefs. 83 add r12, r1, #256 89 ldmdb r12!, {r8, r9, r10, r11} 121 ldmdb r12!, {r8, r9, r10, r11} 229 sub r12, r12, #16 235 // r12 = rows, r14 = col. 236 ldr r12, [sp, #256 [all...] |
/external/qemu/distrib/jpeg-6b/ |
armv6_idct.S | 36 * 16-bit constants; r12 and r14 hold two of the four arguments; and r8 hold 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 79 // r12 = quans, r14 = coefs. 83 add r12, r1, #256 89 ldmdb r12!, {r8, r9, r10, r11} 121 ldmdb r12!, {r8, r9, r10, r11} 229 sub r12, r12, #16 235 // r12 = rows, r14 = col. 236 ldr r12, [sp, #256 [all...] |
/external/libvpx/libvpx/vp8/common/arm/armv6/ |
vp8_sad16x16_armv6.asm | 26 stmfd sp!, {r4-r12, lr} 48 ldr r12, [r2, #0x8] ; load 4 ref pixels (1B) 57 usada8 r4, r10, r12, r4 ; calculate sad for 4 pixels 73 ldr r12, [r2, #0x8] ; load 4 ref pixels (2B) 79 usada8 r4, r10, r12, r4 ; calculate sad for 4 pixels 91 ldmfd sp!, {r4-r12, pc}
|
/external/libvpx/libvpx/vpx_scale/arm/neon/ |
vp8_vpxyv12_copy_y_neon.asm | 42 movs r12, r5, lsr #7 55 subs r12, r12, #1 99 mov r12, r10 105 subs r12, r12, #16
|