/external/llvm/test/CodeGen/Thumb2/ |
thumb2-orr.ll | 21 %tmp = lshr i32 %b, 6 38 %r8 = lshr i32 %a, 8
|
thumb2-sub4.ll | 21 %tmp = lshr i32 %b, 6 38 %r8 = lshr i32 %a, 8
|
thumb2-pack.ll | 54 %tmp4 = lshr i32 %tmp2, 16 ; <i32> [#uses=2] 63 %tmp37 = lshr i32 %Y, 16 ; <i32> [#uses=1] 73 %tmp37 = lshr i32 %Y, 12 ; <i32> [#uses=1] 97 %tmp3 = lshr i32 %Y, 22 107 %tmp2 = lshr i32 %src2, 16
|
/external/llvm/test/CodeGen/X86/ |
2009-05-30-ISelBug.ll | 18 %4 = lshr i32 %3, 8 ; <i32> [#uses=1] 25 %11 = lshr i32 %9, 8 ; <i32> [#uses=1]
|
phys_subreg_coalesce.ll | 15 %6 = lshr i64 %p1.0, 32 ; <i64> [#uses=1] 18 %9 = lshr i64 %p2.0, 32 ; <i64> [#uses=1]
|
bswap.ll | 53 %and = lshr i32 %a, 8 70 %and = lshr i32 %a, 8 97 %hishifted = lshr i64 %big, 8 105 ; This time, the lshr (and subsequent or) is completely useless. While it's 121 %hishifted = lshr i64 %big, 8 146 %hishifted = lshr i64 %big, 8
|
h-registers-0.ll | 22 %t0 = lshr i64 %x, 8 40 %t0 = lshr i32 %x, 8 58 %t0 = lshr i16 %x, 8 75 %t0 = lshr i64 %x, 8 90 %t0 = lshr i32 %x, 8 105 %t0 = lshr i16 %x, 8
|
avx2-shift.ll | 35 %k = lshr <4 x i32> %x, %y 42 %k = lshr <8 x i32> %x, %y 49 %k = lshr <2 x i64> %x, %y 56 %k = lshr <4 x i64> %x, %y 98 %s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 105 %s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 111 %s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2> 184 %k = lshr <4 x i32> %x, %y1 192 %k = lshr <8 x i32> %x, %y1 200 %k = lshr <2 x i64> %x, %y [all...] |
2008-04-28-CoalescerBug.ll | 37 %tmp13116 = lshr i64 %tmp13111, 16 ; <i64> [#uses=1] 40 %tmp13120 = lshr i64 %tmp13111, 32 ; <i64> [#uses=1] 43 %tmp13124 = lshr i64 %tmp13111, 48 ; <i64> [#uses=1] 56 %tmp13188 = lshr i32 %tmp13172, 16 ; <i32> [#uses=1] 57 %tmp13190 = lshr i32 %tmp13177, 16 ; <i32> [#uses=1] 58 %tmp13192 = lshr i32 %tmp13182, 16 ; <i32> [#uses=1] 83 %tmp13296 = lshr i64 %tmp13225, 16 ; <i64> [#uses=1] 85 %tmp13299 = lshr i64 %tmp13225, 32 ; <i64> [#uses=1] 87 %tmp13302 = lshr i64 %tmp13225, 48 ; <i64> [#uses=1]
|
avx2-vector-shifts.ll | 171 %shl = lshr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 181 %shl = lshr <16 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 191 %shl = lshr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 201 %shl = lshr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> 211 %shl = lshr <8 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 221 %shl = lshr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31> 231 %shl = lshr <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0> 241 %shl = lshr <4 x i64> %InVec, <i64 1, i64 1, i64 1, i64 1> 251 %shl = lshr <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63> 266 %sra = lshr <4 x i32> %x, %trun [all...] |
/external/llvm/test/Instrumentation/AddressSanitizer/ |
freebsd.ll | 20 ; CHECK-32: lshr {{.*}} 3 27 ; CHECK-64: lshr {{.*}} 3
|
/external/llvm/test/MC/Mips/ |
mips64shift.ll | 24 %shr = lshr i64 %a0, 10 45 %shr = lshr i64 %a0, 40
|
/external/llvm/test/Transforms/InstCombine/ |
trunc.ll | 56 %C = lshr i128 %B, 16 60 ; CHECK: %C = lshr i32 %A, 16 66 %C = lshr i128 %B, 32 70 ; CHECK: %C = lshr i64 %A, 32 77 %C = lshr i128 %B, 32 82 ; CHECK: %C = lshr i92 %B, 32 128 %shr = lshr i128 %bc, 32 141 %shr = lshr i128 %bc, 64
|
/external/llvm/test/CodeGen/PowerPC/ |
bperm.ll | 43 %0 = lshr i64 %i1, 8 57 %0 = lshr i64 %i1, 6 88 %0 = lshr i64 %i1, 15 116 %and = lshr i32 %x, 16 132 %0 = lshr i64 %i0, 5 145 %0 = lshr i64 %i0, 1 160 %0 = lshr i64 %i1, 14 230 %shr = lshr i64 %x, 28 244 %shr = lshr i64 %x, 28 262 %shr = lshr i64 %x, 2 [all...] |
2009-09-18-carrybit.ll | 21 %tmp1 = lshr i64 %r.0273, 31 ; <i64> [#uses=1] 25 %tmp106 = lshr i32 %tmp213, 31 ; <i32> [#uses=1] 30 %tmp109 = lshr i64 %q.0272, 63 ; <i64> [#uses=1] 35 %tmp112 = lshr i64 %q.0272, 31 ; <i64> [#uses=1] 39 %tmp115 = lshr i32 %tmp158, 31 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/AArch64/ |
addsub-shifted.ll | 78 %shift1 = lshr i32 %rhs32, 18 83 %shift2 = lshr i32 %rhs32, 31 88 %shift3 = lshr i32 %rhs32, 5 94 %shift4 = lshr i32 %rhs32, 19 99 %shift4a = lshr i32 %lhs32, 15 104 %shift5 = lshr i64 %rhs64, 18 109 %shift6 = lshr i64 %rhs64, 31 114 %shift7 = lshr i64 %rhs64, 5 120 %shift8 = lshr i64 %rhs64, 19 125 %shift8a = lshr i64 %lhs64, 4 [all...] |
fast-isel-shift.ll | 56 %2 = lshr i16 %1, 1 66 %2 = lshr i16 %1, 1 74 %2 = lshr i32 %1, 1 83 %2 = lshr i32 %1, 1 91 %2 = lshr i64 %1, 1 305 %1 = lshr i8 %a, %b 312 %1 = lshr i8 %a, 4 320 %2 = lshr i16 %1, 4 329 %2 = lshr i16 %1, 4 337 %2 = lshr i32 %1, [all...] |
arm64-long-shift.ll | 46 define i128 @lshr(i128 %r, i128 %s) nounwind readnone { 47 ; CHECK-LABEL: lshr: 63 %shr = lshr i128 %r, %s
|
/external/llvm/test/CodeGen/AMDGPU/ |
shl.ll | 58 ;EG: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}} 60 ;EG-DAG: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1 86 ;EG-DAG: LSHR {{\*? *}}[[COMPSHA]] 87 ;EG-DAG: LSHR {{\*? *}}[[COMPSHB]] 88 ;EG-DAG: LSHR {{.*}}, 1 89 ;EG-DAG: LSHR {{.*}}, 1 127 ;EG-DAG: LSHR {{\*? *}}[[COMPSHA]] 128 ;EG-DAG: LSHR {{\*? *}}[[COMPSHB]] 129 ;EG-DAG: LSHR {{\*? *}}[[COMPSHC]] 130 ;EG-DAG: LSHR {{\*? *}}[[COMPSHD] [all...] |
image-resource-id.ll | 8 ; EG-NEXT: LSHR 22 ; EG-NEXT: LSHR 38 ; EG-NEXT: LSHR 52 ; EG-NEXT: LSHR 68 ; EG-NEXT: LSHR 83 ; EG-NEXT: LSHR 98 ; EG-NEXT: LSHR 113 ; EG-NEXT: LSHR 130 ; EG-NEXT: LSHR 145 ; EG-NEXT: LSHR [all...] |
/external/llvm/test/CodeGen/Thumb/ |
2009-08-12-ConstIslandAssert.ll | 16 %6 = lshr i32 %4, 24 ; <i32> [#uses=1] 19 %9 = lshr i32 %4, 16 ; <i32> [#uses=1] 37 %27 = lshr i32 %24, 24 ; <i32> [#uses=1] 40 %30 = lshr i32 %24, 16 ; <i32> [#uses=1] 46 %36 = lshr i32 %24, 8 ; <i32> [#uses=1] 61 %51 = lshr i32 %48, 24 ; <i32> [#uses=1] 64 %54 = lshr i32 %48, 16 ; <i32> [#uses=1] 70 %60 = lshr i32 %48, 8 ; <i32> [#uses=1] 84 %74 = lshr i32 %72, 24 ; <i32> [#uses=1] 87 %77 = lshr i32 %72, 16 ; <i32> [#uses=1 [all...] |
/external/llvm/test/CodeGen/ARM/ |
pack.ll | 45 %tmp4 = lshr i32 %tmp2, 16 54 %tmp37 = lshr i32 %Y, 16 64 %tmp37 = lshr i32 %Y, 12 88 %tmp3 = lshr i32 %Y, 22 98 %tmp2 = lshr i32 %src2, 16
|
/external/llvm/test/CodeGen/Hexagon/ |
brev_st.ll | 23 %shr2 = lshr i32 %conv, 1 41 %shr1 = lshr i32 %conv, 1 59 %shr2 = lshr i32 %conv, 1 77 %shr2 = lshr i32 %conv, 1 95 %shr2 = lshr i32 %conv, 1
|
/external/llvm/lib/Transforms/InstCombine/ |
InstCombineShifts.cpp | 66 /// %F = lshr i128 %E, 64 90 // lshr iff we know that the bits we would otherwise be shifting in are 142 case Instruction::LShr: { 147 // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2). 150 // We can always turn lshr(c)+shl(c) -> and(c2). 155 // We can always turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but it isn't 238 // We turn shl(c)+lshr(c) -> and(c2) if the input doesn't already hav [all...] |
/external/clang/test/CodeGen/ |
x86_64-xsave.c | 20 // XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
28 // XSAVE: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
36 // XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32
44 // XSAVE: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32
54 // XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
62 // XSAVEOPT: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
72 // XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
80 // XSAVEC: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32
90 // XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32
98 // XSAVES: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 [all...] |