HomeSort by relevance Sort by last modified time
    Searched refs:LSL (Results 1 - 25 of 269) sorted by null

1 2 3 4 5 6 7 8 91011

  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/aarch64/
illegal-2.s 13 add wsp, w0, #0xfff0, LSL #12
14 add wsp, w0, #0xfff0, LSL #0
15 add wsp, w0, u16, LSL #12
16 add wsp, w0, u16, LSL #0
movi.s 70 // MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}
73 all_8bit_imm_movi_sft v7.\T, 0, 63, LSL, \amount
74 all_8bit_imm_movi_sft v7.\T, 64, 127, LSL, \amount
75 all_8bit_imm_movi_sft v7.\T, 128, 191, LSL, \amount
76 all_8bit_imm_movi_sft v7.\T, 192, 255, LSL, \amount
86 // MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}
89 all_8bit_imm_movi_sft v7.\T, 0, 63, LSL, \amount
90 all_8bit_imm_movi_sft v7.\T, 64, 127, LSL, \amount
91 all_8bit_imm_movi_sft v7.\T, 128, 191, LSL, \amount
92 all_8bit_imm_movi_sft v7.\T, 192, 255, LSL, \amoun
    [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/arm/
addthumb2err.l 2 [^:]*:9: Error: shift value over 3 not allowed in thumb mode -- `add sp,sp,r0,LSL#4'
3 [^:]*:10: Error: only LSL shift allowed in thumb mode -- `add sp,sp,r0,LSR#3'
4 [^:]*:11: Error: only LSL shift allowed in thumb mode -- `add sp,sp,r0,ASR#3'
5 [^:]*:12: Error: only LSL shift allowed in thumb mode -- `add sp,sp,r0,ROR#3'
6 [^:]*:13: Error: only LSL shift allowed in thumb mode -- `add sp,sp,r0,RRX'
7 [^:]*:14: Error: shift value over 3 not allowed in thumb mode -- `adds sp,sp,r0,LSL#4'
8 [^:]*:15: Error: only LSL shift allowed in thumb mode -- `adds sp,sp,r0,LSR#3'
9 [^:]*:16: Error: only LSL shift allowed in thumb mode -- `adds sp,sp,r0,ASR#3'
10 [^:]*:17: Error: only LSL shift allowed in thumb mode -- `adds sp,sp,r0,ROR#3'
11 [^:]*:18: Error: only LSL shift allowed in thumb mode -- `adds sp,sp,r0,RRX
    [all...]
sp-pc-validations-bad.s 11 ldr r0,[r1,pc, LSL #2] @ Unpredictable
12 ldr r0,[r1,pc, LSL #2]! @ ditto
13 ldr r0,[r1],pc, LSL #2 @ ditto
14 ldr r0,[pc,r1, LSL #2]! @ ditto
15 ldr r0,[pc],r1, LSL #2 @ ditto
27 ldrb pc,[r0,r1, LSL #2] @ Unpredictable
28 ldrb pc,[r0,r1, LSL #2]! @ ditto
29 ldrb pc,[r0],r1, LSL #2 @ ditto
30 ldrb r0,[r1,pc, LSL #2] @ ditto
31 ldrb r0,[r1,pc, LSL #2]! @ ditt
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
Deemph_32_opt.s 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16
47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4
48 MOV r10, r12, LSL #3 @L_tmp <<= 3
53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac)
58 MOV r10, r6, LSL #16
59 ADD r12, r10, r7, LSL #4
61 MOV r10, r12, LSL #3
64 MOV r12, r10, LSL #1
72 MOV r10, r6, LSL #16
73 ADD r12, r10, r7, LSL #
    [all...]
cor_h_vec_opt.s 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track]
49 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
63 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
66 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2)
71 ADD r9, r3, r2, LSL #1 @address of sign[pos]
83 ADD r9, r9, r4, LSL #1
84 ADD r12, r12, r4, LSL #1
94 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
109 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
112 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
Deemph_32_neon.s 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16
47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4
48 MOV r10, r12, LSL #3 @L_tmp <<= 3
53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac)
58 MOV r10, r6, LSL #16
59 ADD r12, r10, r7, LSL #4
61 MOV r10, r12, LSL #3
64 MOV r12, r10, LSL #1
72 MOV r10, r6, LSL #16
73 ADD r12, r10, r7, LSL #
    [all...]
cor_h_vec_neon.s 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track]
50 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
64 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
67 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2)
72 ADD r9, r3, r2, LSL #1 @address of sign[pos]
84 ADD r9, r9, r4, LSL #1
85 ADD r12, r12, r4, LSL #1
95 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos]
110 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2)
113 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/
armCOMM_BitDec_s.h 148 ORR $T2, $T2, $T1, LSL #8
149 ORR $BitBuffer, $T2, $BitBuffer, LSL #16
174 MOV $Symbol, $BitBuffer, LSL $BitCount
199 MOV $Symbol, $BitBuffer, LSL $BitCount
225 ORRCS $BitBuffer, $T1, $BitBuffer, LSL #8
250 MOVS $Symbol, $BitBuffer, LSL $BitCount
255 ORRCS $BitBuffer, $T1, $BitBuffer, LSL #8
286 MOVS $Symbol, $BitBuffer, LSL $BitCount
287 ORR $BitBuffer, $T1, $BitBuffer, LSL #8
292 ORRCS $BitBuffer, $T1, $BitBuffer, LSL #
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/
armCOMM_BitDec_s.h 148 ORR $T2, $T2, $T1, LSL #8
149 ORR $BitBuffer, $T2, $BitBuffer, LSL #16
174 MOV $Symbol, $BitBuffer, LSL $BitCount
199 MOV $Symbol, $BitBuffer, LSL $BitCount
225 ORRCS $BitBuffer, $T1, $BitBuffer, LSL #8
250 MOVS $Symbol, $BitBuffer, LSL $BitCount
255 ORRCS $BitBuffer, $T1, $BitBuffer, LSL #8
286 MOVS $Symbol, $BitBuffer, LSL $BitCount
287 ORR $BitBuffer, $T1, $BitBuffer, LSL #8
292 ORRCS $BitBuffer, $T1, $BitBuffer, LSL #
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_Copy_unsafe_s.S 33 ADD pc,pc,r12,LSL #2
55 ORR r4,r4,r5,LSL #24
58 ORR r8,r8,r9,LSL #24
65 ORR r4,r4,r5,LSL #24
68 ORR r8,r8,r9,LSL #24
77 ORR r4,r4,r5,LSL #16
80 ORR r8,r8,r9,LSL #16
87 ORR r4,r4,r5,LSL #16
90 ORR r8,r8,r9,LSL #16
99 ORR r4,r4,r5,LSL #
    [all...]
armVCM4P10_InterpolateLuma_Align_unsafe_s.S 33 ADD pc,pc,r7,LSL #2
51 ORR r7,r7,r10,LSL #24
53 ORR r10,r10,r11,LSL #24
63 ORR r7,r7,r10,LSL #16
65 ORR r10,r10,r11,LSL #16
75 ORR r7,r7,r10,LSL #8
77 ORR r10,r10,r11,LSL #8
90 ADD pc,pc,r7,LSL #2
106 LSL r10,r10,#24
115 LSL r10,r10,#1
    [all...]
omxVCM4P10_InterpolateLuma_s.S 37 ADD r6,r6,r7,LSL #2
43 ADD pc,pc,r6,LSL #2
62 ADD r12,r0,r1,LSL #1
67 ADD r12,r2,r3,LSL #1
81 ADD r12,r2,r3,LSL #1
91 ADD r12,r2,r3,LSL #1
105 ADD r12,r2,r3,LSL #1
113 SUB r0,r0,r1,LSL #1
119 ADD r12,r2,r3,LSL #1
128 SUB r0,r0,r1,LSL #
    [all...]
armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S 51 ORR r11,r10,r11,LSL #8
52 ORR r10,r4,r5,LSL #8
83 ORR r11,r10,r11,LSL #8
84 ORR r10,r4,r5,LSL #8
85 PKHBT r4,r10,r11,LSL #16
105 ORR r11,r10,r11,LSL #8
106 ORR r10,r4,r5,LSL #8
107 PKHBT r4,r10,r11,LSL #16
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/vc/m4p10/src/
armVCM4P10_Interpolate_Chroma_s.s 135 ADD dxEightMinusdx, EightMinusdx, dx, LSL #16
136 ORR iWidth, iWidth, temp, LSL #16
167 ORR x01x00, x00, x01, LSL #16
169 ORR x02x01, x01, x02, LSL #16
171 ORR x11x10, x10, x11, LSL #16
172 ORR x12x11, x11, x12, LSL #16
187 RSB pSrc2, pSrc, pSrc1, LSL #1
194 ORR OutRow0100, OutRow00, OutRow01, LSL #8
204 ORR x21x20, x20, x21, LSL #16
205 ORR x22x21, x21, x22, LSL #16
    [all...]
omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.s 97 ORR rowLuma01,rowLuma01,temp3,LSL #16 ;// rowLuma01 = [0b|0a]
106 ;// So we can shift the packed rowLuma values [0b|0a] with a single LSL operation
110 LSL rowLuma01,rowLuma01,shift
111 LSL rowLuma23,rowLuma23,shift
139 PKHBT SrcDst00,SrcDst00,temp1,LSL #16 ;// Pack the first two product values
146 PKHBT SrcDst02,SrcDst02,temp2,LSL #16 ;// Pack the next two product values
150 PKHBT SrcDst10,SrcDst10,temp1,LSL #16 ;// Pack the next two product values
158 PKHBT SrcDst12,SrcDst12,temp2,LSL #16 ;// Pack the next two product values
162 PKHBT SrcDst20,SrcDst20,temp1,LSL #16 ;// Pack the next two product values
174 PKHBT SrcDst22,SrcDst22,temp2,LSL #16 ;// Pack the remaining product value
    [all...]
armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.s 121 SUB pSrc, pSrc, srcStep, LSL #2
131 RSB ValCD0, ValEB0, ValCD0, LSL #2 ;// 4*(Off+C+D) - (Off+B+E)
133 LDR ValD, [pSrc, srcStep, LSL #1] ;// Load [d3 d2 d1 d0]
135 RSB ValCD1, ValEB1, ValCD1, LSL #2
140 LDR ValF, [pSrc, srcStep, LSL #2] ;// Load [f3 f2 f1 f0]
142 ADD ValCD0, ValCD0, ValCD0, LSL #2 ;// 5 * [4*(Off+C+D) - (Off+B+E)]
143 ADD ValCD1, ValCD1, ValCD1, LSL #2
146 RSB ValED1, ValCF1, ValED1, LSL #2
148 SUB ValA, pSrc, srcStep, LSL #1
150 RSB ValED0, ValCF0, ValED0, LSL #2 ;// 4*(Off+E+D) - (Off+C+F)
    [all...]
armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s 110 PKHBT ValB, ValA, ValD, LSL #16 ;// [b1 a1 b0 a0]
114 PKHBT ValI, Temp1, Temp2, LSL #16 ;// [00 i1 00 i0]
115 PKHBT ValF, ValE, ValH, LSL #16 ;// [f1 e1 f0 e0]
123 RSB Temp1, Temp3, Temp1, LSL #2
125 ADD Temp1, Temp1, Temp1, LSL #2
132 RSB Temp1, Temp3, Temp1, LSL #2
134 ADD Temp1, Temp1, Temp1, LSL #2
153 ORR Acc0, Acc0, Acc1, LSL #8
154 RSB Temp5, Temp1, Temp2, LSL #2
156 ADD Temp5, Temp5, Temp5, LSL #2
    [all...]
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s 134 PKHBT ValB, ValA, ValD, LSL #16 ;// [b1 a1 b0 a0]
138 PKHBT ValI, Temp1, Temp2, LSL #16 ;// [00 i1 00 i0]
139 PKHBT ValF, ValE, ValH, LSL #16 ;// [f1 e1 f0 e0]
147 RSB Temp1, Temp3, Temp1, LSL #2
149 ADD Temp1, Temp1, Temp1, LSL #2
156 RSB Temp1, Temp3, Temp1, LSL #2
158 ADD Temp1, Temp1, Temp1, LSL #2
171 RSB Temp1, Temp1, Temp2, LSL #2
173 ADD Temp1, Temp1, Temp1, LSL #2
179 RSB Temp1, Temp1, Temp2, LSL #
    [all...]
armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.s 139 ADD Counter, Temp, Counter, LSL #8 ;// [0 0 H W]
150 SUB pSrc, pSrc, srcStep, LSL #2
159 RSB ValCD0, ValEB0, ValCD0, LSL #2 ;// 4*(Off+C+D) - (Off+B+E)
161 LDR ValD, [pSrc, srcStep, LSL #1] ;// Load [d3 d2 d1 d0]
163 RSB ValCD1, ValEB1, ValCD1, LSL #2
167 LDR ValF, [pSrc, srcStep, LSL #2] ;// Load [f3 f2 f1 f0]
169 ADD ValCD0, ValCD0, ValCD0, LSL #2 ;// 5 * [4*(Off+C+D) - (Off+B+E)]
170 ADD ValCD1, ValCD1, ValCD1, LSL #2
173 RSB ValED1, ValCF1, ValED1, LSL #2
175 SUB ValA, pSrc, srcStep, LSL #
    [all...]
omxVCM4P10_TransformDequantLumaDCFromPair_s.s 196 PKHBT trRow00,in00,in10,LSL #16 ;// [1 0] = [f4:f0]
206 PKHBT trRow20,in02,in12,LSL #16 ;// [9 8] = [6 2]
214 PKHBT trRow02,in20,in30,LSL #16 ;// [3 2] = [f12:f8]
227 PKHBT trRow22,in22,in32,LSL #16 ;// [11 10] = [14 10]
273 PKHBT trCol00,rowOp00,rowOp10,LSL #16 ;// [1 0] = [f4:f0]
283 PKHBT trCol20,rowOp02,rowOp12,LSL #16 ;// [9 8] = [6 2]
291 PKHBT trCol02,rowOp20,rowOp30,LSL #16 ;// [3 2] = [f12:f8]
304 PKHBT trCol22,rowOp22,rowOp32,LSL #16 ;// [11 10] = [14 10]
361 LSL Scale, Scale, Shift ;// Scale = Scale << Shift
372 PKHBT out00, temp1, temp2, LSL #14 ;// c0w0 = | Temp2 | Temp1
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/
h264bsd_interpolate_ver_half.s 138 ADD count, partW, partH, LSL #16 ;// |partH|partW|
147 ADD count, count, tmp1, LSL #24 ;// partWidth-1 to top byte
161 ADD tmpa, tmpa, tmpa, LSL #2 ;// 5(G+M)
164 ADD tmpa, plus16, tmpa, LSL #2 ;// 16+20(G+M)
168 ADD tmpb, tmpb, tmpb, LSL #2 ;// 5(C+R)
177 ADD tmpa, tmpa, tmpa, LSL #2 ;// 5(G+M)
179 ADD tmpa, plus16, tmpa, LSL #2 ;// 16+20(G+M)
184 ADD tmpb, tmpb, tmpb, LSL #2 ;// 5(C+R)
191 AND tmpa, tmpa, tmpb, LSL #3 ;// mask and divede by 32
206 ADD tmpa, tmpa, tmpa, LSL #2 ;// 5(G+M
    [all...]
h264bsd_interpolate_chroma_hor_ver.s 164 ADD count, count, tmp2, LSL #16 ;// chromaPartHeight-1
165 ADD count, count, tmp2, LSL #24 ;// loop_y
166 ADD count, count, tmp1, LSL #20 ;// chromaPartWidth-1
168 PKHBT valY, valY, yFrac, LSL #16 ;// |yFrac|valY |
182 LDRB tmp5, [ptrA, width, LSL #1]
184 PKHBT tmp1, tmp1, tmp3, LSL #16 ;// |t3|t1|
185 PKHBT tmp3, tmp3, tmp5, LSL #16 ;// |t5|t3|
190 ADD count, count, tmp2, LSL #8
195 LDRB tmp6, [ptrA, width, LSL #1]
197 PKHBT tmp2, tmp2, tmp4, LSL #16 ;// |t4|t2
    [all...]
h264bsd_interpolate_ver_quarter.s 137 ADD count, partW, partH, LSL #8 ;// |xx|xx|partH|partW|
139 RSB count, tmp5, count, LSL #8 ;// |xx|partH-1|partW-1|xx|
148 ADD count, count, tmp1, LSL #16 ;// partWidth-1 to top byte
162 ADD tmpa, tmpa, tmpa, LSL #2 ;// 5(G+M)
165 ADD tmpa, plus16, tmpa, LSL #2 ;// 16+20(G+M)
169 ADD tmpb, tmpb, tmpb, LSL #2 ;// 5(C+R)
178 ADD tmpa, tmpa, tmpa, LSL #2 ;// 5(G+M)
180 ADD tmpa, plus16, tmpa, LSL #2 ;// 16+20(G+M)
185 ADD tmpb, tmpb, tmpb, LSL #2 ;// 5(C+R)
189 MOVS tmp1, count, LSL #31 ;// update flags (verOffset
    [all...]
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/
omxVCM4P10_InterpolateLuma_s.s 196 ADD index, idx, idy, LSL #2 ;// [index] = [idy][idx]
209 ADD pc, pc, index, LSL #2
233 ADD Temp, pSrc, srcStep, LSL #1
239 ADD Temp, pDst, dstStep, LSL #1
256 ADD Temp, pDst, dstStep, LSL #1
269 ADD Temp, pDst, dstStep, LSL #1
286 ADD Temp, pDst, dstStep, LSL #1
297 SUB pSrc, pSrc, srcStep, LSL #1
303 ADD Temp, pDst, dstStep, LSL #1
315 SUB pSrc, pSrc, srcStep, LSL #
    [all...]

Completed in 393 milliseconds

1 2 3 4 5 6 7 8 91011