/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/ |
pvmp3_polyphase_filter_window_gcc.s | 71 add r12,r0,r2,lsl #2 72 ldr r6,[r12,#0x780] 76 ldr r7,[r12,#0x80] 91 ldr r6,[r12,#0x680] 95 ldr r7,[r12,#0x180] 109 ldr r6,[r12,#0x580] 113 ldr r7,[r12,#0x280] 128 ldr r6,[r12,#0x480] 132 ldr r12,[r12,#0x380 [all...] |
pvmp3_mdct_18_gcc.s | 49 add r12,r3,#0x44 70 ldr r8,[r12],#-4 106 ldr r12,[r5,#0x14] 108 str r12,[r5,#0x28] 109 ldr r12,[r5,#0x10] 110 str r12,[r5,#0x20] 111 ldr r12,[r5,#0xc] 112 str r12,[r5,#0x18] 113 ldr r12,[r5,#8] 114 str r12,[r5,#0x10 [all...] |
/external/linux-tools-perf/perf-3.12.0/arch/avr32/lib/ |
memset.S | 16 * r12: void *b 20 * Returns b in r12 27 mov r9, r12 28 mov r8, r12 47 reteq r12 57 retcc r12 59 retal r12
|
/external/openssl/crypto/bn/asm/ |
ppc.pl | 235 #.set r12,12 841 # r10, r11, r12 are the equivalents of c1, c2, and c3. 855 adde r12,r9,r0 863 adde r12,r9,r12 870 addc r12,r8,r12 878 addc r12,r8,r12 886 addc r12,r8,r1 [all...] |
armv4-mont.S | 39 stmdb sp!,{r4-r12,lr} @ save 10 registers 58 mov r12,#0 59 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" 69 umlal r12,r14,r6,r8 @ np[j]*n0 70 adds r12,r12,r10 71 str r12,[r4],#4 @ tp[j-1]=,tp++ 72 adc r12,r14,#0 76 adds r12,r12,r1 [all...] |
/external/libhevc/common/arm/ |
ihevc_inter_pred_chroma_horz.s | 108 stmfd sp!, {r4-r12, r14} @stack stores the values of the arguments 121 sub r12,r0,#2 @pu1_src - 2 123 add r4,r12,r2 @pu1_src_tmp2_8 = pu1_src + src_strd 152 add r4,r12,r2 154 and r0, r12, #31 156 pld [r12, r2, lsl #1] 161 vld1.u32 {q0},[r12],r11 @vector load pu1_src 163 vld1.u32 {q1},[r12],r11 @vector load pu1_src 165 vld1.u32 {q2},[r12],r11 @vector load pu1_src 167 vld1.u32 {q3},[r12],r9 @vector load pu1_sr [all...] |
ihevc_intra_pred_luma_horz.s | 99 stmfd sp!, {r4-r12, r14} @stack stores the values of the arguments 106 add r12,r0,r6 @*pu1_ref[two_nt] 115 sub r12,r12,#16 @move to 16th value pointer 119 vld1.8 {q0},[r12] @load 16 values. d1[7] will have the 1st value. 175 sub r12,r12,#16 @move to 16th value pointer 188 ldmfd sp!,{r4-r12,r15} @reload the registers from sp 192 ldrb lr,[r12],#1 @pu1_ref[two_nt] 193 vld1.8 {q15},[r12] @pu1_ref[two_nt + 1 + col [all...] |
ihevc_sao_edge_offset_class0_chroma.s | 75 STMFD sp!, {r4-r12, r14} @stack stores the values of the arguments 83 LDRH r12,[r11,#-2] @pu1_src_top[wd - 1] 87 STRH r12,[r4] @*pu1_src_top_left = pu1_src_top[wd - 1] 105 MOV r12,r9 @Move wd to r12 for loop count 109 SUBS r12,r12,#8 @Decrement the loop counter by 8 125 LDRB r12,[r7] @pu1_avail[0] 126 VMOV.8 D8[0],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 0) 127 VMOV.8 D8[1],r12 @vsetq_lane_s8(pu1_avail[0], au1_mask, 1 [all...] |
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
pred_lt4_1_opt.s | 39 STMFD r13!, {r4 - r12, r14} 71 SMULBB r12, r9, r3 @x[2] * h[0] 76 SMLABT r12, r4, r3, r12 @x[3] * h[1] 82 SMLABB r12, r6, r3, r12 @x[4] * h[2] 87 SMLABT r12, r9, r3, r12 @x[5] * h[3] 93 SMLABB r12, r4, r3, r12 @x[6] * h[4 [all...] |
Norm_Corr_opt.s | 53 STMFD r13!, {r4 - r12, r14} 80 LDR r12, [r14], #4 87 SMLABB r6, r12, r12, r6 88 SMLATT r6, r12, r12, r6 107 MOV r12, r1 @copy of xn[] 113 LDR r10, [r12], #4 @load xn[i], xn[i+1] 119 LDR r10, [r12], #4 @load xn[i+2], xn[i+3] 153 MOV r12, r [all...] |
/art/runtime/arch/arm/ |
jni_entrypoints_arm.S | 34 mov r12, r0 @ save result in r12 45 bx r12 @ if non-null, tail call to method's code
|
/external/chromium_org/third_party/boringssl/linux-arm/crypto/bn/ |
armv4-mont.S | 40 stmdb sp!,{r4-r12,lr} @ save 10 registers 59 mov r12,#0 60 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" 70 umlal r12,r14,r6,r8 @ np[j]*n0 71 adds r12,r12,r10 72 str r12,[r4],#4 @ tp[j-1]=,tp++ 73 adc r12,r14,#0 77 adds r12,r12,r1 [all...] |
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/ |
band_nrg_v5.s | 32 ldr r12, [r13, #36] 78 str r9, [r12, #0] 89 mov r12, #0 109 str r12, [r13, #12] 119 ldr r12, [r0, +r10, lsl #2] 125 mov r12, r12, asr #1 130 add r5, r12, r14 132 sub r8, r12, r14 147 ldr r12, [r0, +r10, lsl #2 [all...] |
/external/openssl/crypto/sha/asm/ |
sha256-armv4.S | 37 ldr r12,.LOPENSSL_armcap 38 ldr r12,[r3,r12] @ OPENSSL_armcap_P 39 tst r12,#ARMV8_SHA256 41 tst r12,#ARMV7_NEON 55 eor r12,r12,r12 62 add r4,r4,r12 @ h+=Maj(a,b,c) from the past 67 add r4,r4,r12 @ h+=Maj(a,b,c) from the pas [all...] |
/external/chromium_org/third_party/boringssl/linux-arm/crypto/sha/ |
sha256-armv4.S | 39 ldr r12,.LOPENSSL_armcap 40 ldr r12,[r3,r12] @ OPENSSL_armcap_P 41 tst r12,#1 55 eor r12,r12,r12 62 add r4,r4,r12 @ h+=Maj(a,b,c) from the past 67 add r4,r4,r12 @ h+=Maj(a,b,c) from the past 68 ldrb r12,[r1,#2 [all...] |
/bionic/libc/arch-arm/generic/bionic/ |
memcpy.S | 82 movs r12, r3, lsl #31 86 ldrcsb r12,[r1], #1 89 strcsb r12,[r0], #1 94 eor r12, r0, r1 95 tst r12, #3 111 movs r12, r3, lsl #28 149 bic r12, r1, #0x1F 150 add r12, r12, #64 153 pld [r12, #64 [all...] |
/external/chromium_org/third_party/boringssl/linux-arm/crypto/aes/ |
aes-armv4.S | 171 stmdb sp!,{r1,r4-r12,lr} 172 mov r12,r0 @ inp 176 ldrb r0,[r12,#3] @ load input data in endian-neutral 177 ldrb r4,[r12,#2] @ manner... 178 ldrb r5,[r12,#1] 179 ldrb r6,[r12,#0] 181 ldrb r1,[r12,#7] 183 ldrb r4,[r12,#6] 185 ldrb r5,[r12,#5] 186 ldrb r6,[r12,#4 [all...] |
/external/openssl/crypto/aes/asm/ |
aes-armv4.S | 169 stmdb sp!,{r1,r4-r12,lr} 170 mov r12,r0 @ inp 174 ldrb r0,[r12,#3] @ load input data in endian-neutral 175 ldrb r4,[r12,#2] @ manner... 176 ldrb r5,[r12,#1] 177 ldrb r6,[r12,#0] 179 ldrb r1,[r12,#7] 181 ldrb r4,[r12,#6] 183 ldrb r5,[r12,#5] 184 ldrb r6,[r12,#4 [all...] |
/external/tremolo/Tremolo/ |
bitwiseARM.s | 46 LDMIA r0,{r2,r3,r12} 49 @ r12= bitsLeftInWord 54 RSB r14,r12,#32 @ r14= 32-bitsLeftInWord 55 SUBS r12,r12,r1 @ r12= bitsLeftInWord -= bits 58 ADDLE r12,r12,#32 @ r12= bitsLeftInWord += 32 71 @ r12 = bitsLeftInWor [all...] |
/hardware/samsung_slsi/exynos5/libswconverter/ |
csc_tiled_to_linear_y_neon.s | 67 @r12 temp1 70 stmfd sp!, {r4-r12,r14} @ backup registers 91 mul r12, r2, r5 @ temp1 = width * i + j; 94 add r12, r12, r6 96 add r7, r0, r12 @ dst_offset = y_dst + temp1 129 mul r12, r2, r5 @ temp1 = width * i + j; 130 add r12, r12, r6 131 add r7, r0, r12 @ r7 = y_dst + temp [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/arm/armv6/ |
idct_v6.asm | 39 ldr r12, [r0, #(12*2)] ; i13|i12 47 smulbt r11, r5, r12 ; (ip[13] * cospi8sqrt2minus1) >> 16 52 smulwt r7, r4, r12 ; (ip[13] * sinpi8sqrt2) >> 16 53 smulbb r9, r5, r12 ; (ip[12] * cospi8sqrt2minus1) >> 16 54 smulwb r10, r4, r12 ; (ip[12] * sinpi8sqrt2) >> 16 61 uadd16 r7, r12, r9 ; 13c+13 | 12c+12 87 ldr r12,[r0, #(2*2)] ; i3 | i2 102 pkhbt r9, r14, r12, lsl #16 ; i2 | i6 105 pkhtb r6, r12, r14, asr #16 ; i3 | i7 111 smulbb r12, r5, r6 ; (ip[7] * cospi8sqrt2minus1) >> 1 [all...] |
/external/libvpx/libvpx/vp8/common/arm/armv6/ |
idct_v6.asm | 39 ldr r12, [r0, #(12*2)] ; i13|i12 47 smulbt r11, r5, r12 ; (ip[13] * cospi8sqrt2minus1) >> 16 52 smulwt r7, r4, r12 ; (ip[13] * sinpi8sqrt2) >> 16 53 smulbb r9, r5, r12 ; (ip[12] * cospi8sqrt2minus1) >> 16 54 smulwb r10, r4, r12 ; (ip[12] * sinpi8sqrt2) >> 16 61 uadd16 r7, r12, r9 ; 13c+13 | 12c+12 87 ldr r12,[r0, #(2*2)] ; i3 | i2 102 pkhbt r9, r14, r12, lsl #16 ; i2 | i6 105 pkhtb r6, r12, r14, asr #16 ; i3 | i7 111 smulbb r12, r5, r6 ; (ip[7] * cospi8sqrt2minus1) >> 1 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/ |
idct_v6.asm | 39 ldr r12, [r0, #(12*2)] ; i13|i12 47 smulbt r11, r5, r12 ; (ip[13] * cospi8sqrt2minus1) >> 16 52 smulwt r7, r4, r12 ; (ip[13] * sinpi8sqrt2) >> 16 53 smulbb r9, r5, r12 ; (ip[12] * cospi8sqrt2minus1) >> 16 54 smulwb r10, r4, r12 ; (ip[12] * sinpi8sqrt2) >> 16 61 uadd16 r7, r12, r9 ; 13c+13 | 12c+12 87 ldr r12,[r0, #(2*2)] ; i3 | i2 102 pkhbt r9, r14, r12, lsl #16 ; i2 | i6 105 pkhtb r6, r12, r14, asr #16 ; i3 | i7 111 smulbb r12, r5, r6 ; (ip[7] * cospi8sqrt2minus1) >> 1 [all...] |
/system/core/libpixelflinger/ |
col32cb16blend.S | 47 and r12, r9, r1, lsr #8 // extract green 50 mov r12, r12, lsl #6 // prescale green 62 smlabb r7, r7, r5, r12 // dest green * alpha + src green
|
rotate90CW_4x4_16v6.S | 38 add r12, r2, r2 47 strd r10, r11, [r0], r12 52 strd r10, r11, [r0], r12 56 strd r10, r11, [r0], r12
|