/external/llvm/test/Bitcode/ |
metadata-2.ll | 31 %tmp1 = lshr i32 %x_arg, 1 ; <i32> [#uses=1] 34 %tmp6 = lshr i32 %tmp4, 2 ; <i32> [#uses=1] 38 %tmp12 = lshr i32 %tmp10, 4 ; <i32> [#uses=1] 41 %tmp18 = lshr i32 %tmp16, 8 ; <i32> [#uses=1] 44 %tmp24 = lshr i32 %tmp22, 16 ; <i32> [#uses=1] 52 %tmp1 = lshr i32 %x_arg, 1 ; <i32> [#uses=1] 57 %tmp8 = lshr i32 %tmp6, 2 ; <i32> [#uses=1] 62 %tmp15 = lshr i32 %tmp13, 4 ; <i32> [#uses=1] 67 %tmp22 = lshr i32 %tmp20, 8 ; <i32> [#uses=1] 72 %tmp29 = lshr i32 %tmp27, 16 ; <i32> [#uses=1 [all...] |
/external/llvm/test/CodeGen/ARM/ |
2013-04-18-load-overlap-PR14824.ll | 16 %data.i.i677.48.extract.shift = lshr i512 %s122, 384 19 %data.i.i677.32.extract.shift = lshr i512 %s122, 256 22 %data.i.i677.16.extract.shift = lshr i512 %s122, 128 25 %data.i.i677.56.extract.shift = lshr i512 %s122, 448 28 %data.i.i677.24.extract.shift = lshr i512 %s122, 192 40 %data.i1.i676.48.extract.shift = lshr i512 %s131, 384 43 %data.i1.i676.32.extract.shift = lshr i512 %s131, 256 46 %data.i1.i676.16.extract.shift = lshr i512 %s131, 128 49 %data.i1.i676.56.extract.shift = lshr i512 %s131, 448 52 %data.i1.i676.24.extract.shift = lshr i512 %s131, 19 [all...] |
/external/llvm/test/CodeGen/X86/ |
avx512-shift.ll | 10 %b = lshr <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 22 %b = lshr <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> 34 %b = lshr <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1> 60 %k = lshr <16 x i32> %x, %y 68 %k = lshr <8 x i64> %x, %y 126 %k = lshr <16 x i32> %x, %y1 135 %k = lshr <8 x i64> %x, %y1
|
selectiondag-cse.ll | 33 %tmp7 = lshr i208 %tmp, 80 40 %tmp14 = lshr i32 %tmp13, 2 43 %tmp16 = lshr i208 %tmp, 96 49 %tmp22 = lshr i32 %tmp21, 2 54 %tmp26 = lshr i32 %tmp25, 2 57 %tmp28 = lshr i208 %tmp, 80 62 %tmp33 = lshr i32 %tmp32, 2
|
code_placement.ll | 24 %3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1] 28 %7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1] 39 %17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1] 53 %30 = lshr i32 %16, 24 ; <i32> [#uses=1] 64 %36 = lshr i32 %29, 16 ; <i32> [#uses=1] 72 %44 = lshr i32 %29, 24 ; <i32> [#uses=1] 95 %59 = lshr i32 %29, 16 ; <i32> [#uses=1] 104 %68 = lshr i32 %29, 8 ; <i32> [#uses=1] 120 %83 = lshr i32 %67, 24 ; <i32> [#uses=1] 123 %85 = lshr i32 %67, 16 ; <i32> [#uses=1 [all...] |
h-register-store.ll | 40 %q = lshr i16 %p, 8 46 %q = lshr i32 %p, 8 52 %q = lshr i64 %p, 8
|
pr15296.ll | 13 %bitop = lshr <8 x i32> %input, %smear.7 26 %bitop = lshr <8 x i32> %input, %smear.7 39 %bitop = lshr <4 x i64> %input, %smear.7
|
shift-and.ll | 52 %res = lshr i64 %val, %shamt 61 %res = lshr i64 %val, %shamt 73 %shr = lshr i64 %key, 3
|
shift-double.ll | 18 %Y = lshr i64 %X, %shift.upgrd.3 ; <i64> [#uses=1] 27 %Y = lshr i32 %B, %shift.upgrd.5 ; <i32> [#uses=1] 37 %Y = lshr i16 %B, %shift.upgrd.7 ; <i16> [#uses=1]
|
shift-folding.ll | 10 %Y = lshr i32 %X, 2 48 %tmp512 = lshr i32 %tmp4, 24 63 %index = lshr i32 %i.zext, 11
|
sse2-vector-shifts.ll | 160 %shl = lshr <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 170 %shl = lshr <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 180 %shl = lshr <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 189 %shl = lshr <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 199 %shl = lshr <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 209 %shl = lshr <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31> 218 %shl = lshr <2 x i64> %InVec, <i64 0, i64 0> 228 %shl = lshr <2 x i64> %InVec, <i64 1, i64 1> 238 %shl = lshr <2 x i64> %InVec, <i64 63, i64 63> 257 %srl0 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2 [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
rotr.ll | 13 %tmp2 = lshr i32 %x, %y 29 %tmp2 = lshr <2 x i32> %x, %y 49 %tmp2 = lshr <4 x i32> %x, %y
|
sampler-resource-id.ll | 6 ; EG-NEXT: LSHR 18 ; EG-NEXT: LSHR 30 ; EG-NEXT: LSHR
|
mul_uint24.ll | 13 %a_24 = lshr i32 %0, 8 15 %b_24 = lshr i32 %1, 8 63 %a_24 = lshr i64 %tmp0, 40 65 %b_24 = lshr i64 %tmp1, 40
|
rotr.i64.ll | 13 %tmp2 = lshr i64 %x, %y 33 %tmp2 = lshr i64 %x, %y 44 %tmp2 = lshr <2 x i64> %x, %y 57 %tmp2 = lshr <2 x i64> %x, %y
|
/external/llvm/test/CodeGen/Mips/ |
mips64extins.ll | 6 %shr = lshr i64 %i, 5 14 %shr = lshr i64 %i, 5 22 %shr = lshr i64 %i, 34
|
/external/llvm/test/CodeGen/Thumb/ |
rev.ll | 6 %tmp1 = lshr i32 %X, 8 22 %tmp1 = lshr i32 %X, 8 49 %shr9 = lshr i16 %a, 8
|
/external/llvm/test/CodeGen/Thumb2/ |
thumb2-uxt_rot.ll | 29 %B.u = lshr i32 %A.u, 8 40 %X.hi = lshr i32 %X, 16 51 %X.hi = lshr i32 %X, 8
|
/external/llvm/test/MC/Mips/ |
mips64extins.ll | 8 %shr = lshr i64 %i, 5 16 %shr = lshr i64 %i, 34 24 %shr = lshr i64 %i, 5
|
/external/llvm/test/Transforms/InstCombine/ |
lshr-phi.ll | 2 ; RUN: not grep lshr %t 5 ; Instcombine should be able to eliminate the lshr, because only 20 %t3 = lshr i32 %k.04, 14 ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
pr18060.ll | 10 %extract = lshr i64 %Value, 12 24 %shr = lshr i64 %Value, 16 26 %extract7 = lshr i64 %Value, 28
|
/external/llvm/test/CodeGen/SystemZ/ |
risbg-01.ll | 11 %shr = lshr i32 %foo, 10 21 %shr = lshr i64 %foo, 10 31 %shr = lshr i32 %foo, 22 41 %shr = lshr i64 %foo, 22 52 %shr = lshr i32 %foo, 2 62 %shr = lshr i64 %foo, 2 74 %shr = lshr i32 %foo, 2 85 %shr = lshr i64 %foo, 2 135 ; This is equivalent to the lshr case, because the bits from the 142 %partb = lshr i32 %foo, 1 [all...] |
/dalvik/dx/tests/101-verify-wide-math/ |
op_lshr.j | 24 lshr
|
/external/llvm/test/Analysis/ValueTracking/ |
knownzero-shift.ll | 8 %A = lshr i8 %2, 1 ; We should know that %A is nonzero.
|
/external/llvm/test/CodeGen/AArch64/ |
rotate.ll | 10 %2 = lshr <2 x i64> %1, <i64 8, i64 8>
|