/external/llvm/test/Transforms/LoopStrengthReduce/ |
nonlinear-postinc.ll | 24 %t22 = lshr i33 %t21, 1
|
/external/llvm/test/Transforms/SCCP/ |
apint-basictest4.ll | 19 %f2 = lshr i133 %f1, 33
|
/external/llvm/unittests/IR/ |
ConstantRangeTest.cpp | 492 TEST_F(ConstantRangeTest, Lshr) { 493 EXPECT_EQ(Full.lshr(Full), Full); 494 EXPECT_EQ(Full.lshr(Empty), Empty); 495 EXPECT_EQ(Full.lshr(One), ConstantRange(APInt(16, 0), 497 EXPECT_EQ(Full.lshr(Some), ConstantRange(APInt(16, 0), 499 EXPECT_EQ(Full.lshr(Wrap), Full); 500 EXPECT_EQ(Empty.lshr(Empty), Empty); 501 EXPECT_EQ(Empty.lshr(One), Empty); 502 EXPECT_EQ(Empty.lshr(Some), Empty); 503 EXPECT_EQ(Empty.lshr(Wrap), Empty) [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-vsra.ll | 78 %tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 88 %tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 > 98 %tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 > 109 %tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > 119 %tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 > 129 %tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 > 139 %tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
|
dp2.ll | 14 %val4_tmp = lshr i64 %val0_tmp, %val1_tmp 35 %val4_tmp = lshr i64 %val0_tmp, %val1_tmp 77 %val4_tmp = lshr i32 %val0_tmp, %val2_tmp 101 %val4_tmp = lshr i32 %val0_tmp, %val1_tmp 155 %ret = lshr i32 1, %val
|
arm64-arith.ll | 71 %lshr = lshr i32 %a, %b 72 ret i32 %lshr 80 %lshr = lshr i64 %a, %b 81 ret i64 %lshr
|
bitfield.ll | 90 %shift2 = lshr i32 %val32, 8 102 %shift5 = lshr i64 %val64, 8 114 %shift8 = lshr i64 %val64, 63 118 %shift9 = lshr i32 %val32, 31 200 %shifted = lshr i32 %fields, 23 209 %shifted = lshr i64 %fields, 25
|
arm64-sli-sri-opt.ll | 27 %vshl_n = lshr <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3> 37 %vshl_n = lshr <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
|
arm64-vshr.ll | 45 %shr = lshr <8 x i16> %0, %1 59 %tmp3 = lshr <1 x i64> %A, < i64 63 >
|
/external/llvm/test/CodeGen/SystemZ/ |
rnsbg-01.ll | 74 %shrb = lshr i32 %b, 10 86 %shrb = lshr i64 %b, 20 147 %shrb = lshr i32 %b, 27 158 %shrb = lshr i64 %b, 59 172 %shrb = lshr i32 %b, 29 184 %shrb = lshr i64 %b, 61 253 %shrorb = lshr i64 %orb, 60
|
/external/llvm/test/CodeGen/X86/ |
2009-11-13-VirtRegRewriterBug.ll | 53 %tmp211 = lshr i256 %mask271.masked.masked.masked.masked.masked.masked.masked, 112 ; <i256> [#uses=0] 55 %tmp208 = lshr i256 %mask266.masked.masked.masked.masked.masked.masked, 128 ; <i256> [#uses=1] 60 %tmp193 = lshr i256 %mask241.masked, 208 ; <i256> [#uses=1] 63 %tmp187 = lshr i256 %ins237, 240 ; <i256> [#uses=1] 97 %tmp101 = lshr i640 %mask133.masked.masked.masked.masked.masked.masked, 256 ; <i640> [#uses=1] 116 %54 = lshr i32 %52, undef ; <i32> [#uses=1] 117 %55 = lshr i32 %53, undef ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/ConstProp/ |
constant-expr.ll | 48 @T1 = global i1 icmp eq (i64 and (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 64) to i64), i64 1), i64 0) 51 @T2 = global i1* inttoptr (i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 192)), i256 192) to i64), i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 192)), i256 128) to i64)) to i1*) 54 @T3 = global i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 64) to i64), i64 -1) 57 @T4 = global i1* inttoptr (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 64) to i64) to i1*) 60 @T5 = global i1* inttoptr (i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 192) to i64), i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 128) to i64)) to i1*)
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
propagate_ir_flags.ll | 11 ; CHECK: lshr exact <4 x i32> 23 %op1 = lshr exact i32 %load1, 1 24 %op2 = lshr exact i32 %load2, 1 25 %op3 = lshr exact i32 %load3, 1 26 %op4 = lshr exact i32 %load4, 1 37 ; CHECK: lshr <4 x i32> 49 %op1 = lshr exact i32 %load1, 1 50 %op2 = lshr i32 %load2, 1 51 %op3 = lshr exact i32 %load3, 1 52 %op4 = lshr exact i32 %load4, [all...] |
/external/llvm/test/Transforms/InstSimplify/ |
undef.ll | 111 %r = lshr i64 undef, undef 118 %r = lshr i64 %a, undef 188 %b = lshr exact i32 undef, %a 202 %b = lshr i32 0, undef 265 %b = lshr i32 undef, 0
|
/external/clang/test/CodeGenCXX/ |
bitfield.cpp | 32 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 50 41 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 14 48 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 48 58 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 16 65 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 42 75 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 22 82 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 40 92 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 24 99 // CHECK-PPC64: %[[shr:.*]] = lshr i64 %[[val]], 10 109 // CHECK-X86-64: %[[shr:.*]] = lshr i64 %[[val]], 5 [all...] |
/external/llvm/test/Assembler/ |
flags.ll | 139 ; CHECK: %z = lshr i64 %x, %y 140 %z = lshr i64 %x, %y 145 ; CHECK: %z = lshr exact i64 %x, %y 146 %z = lshr exact i64 %x, %y 193 ; CHECK: ret i64 lshr exact (i64 ptrtoint (i64* @addr to i64), i64 9) 194 ret i64 lshr exact (i64 ptrtoint (i64* @addr to i64), i64 9)
|
/external/llvm/test/CodeGen/AMDGPU/ |
cvt_f32_ubyte.ll | 7 ; SI-NOT: lshr 20 ; SI-NOT: lshr 49 ; SI-NOT: lshr 129 ; SI-NOT: lshr 139 ; SI-NOT: lshr 167 %shr = lshr i32 %inreg, 8
|
mad_uint24.ll | 13 %a_24 = lshr i32 %0, 8 15 %b_24 = lshr i32 %1, 8
|
predicates.ll | 38 %2 = lshr i32 %in, 1 97 %4 = lshr i32 %in, 1
|
/external/llvm/test/CodeGen/ARM/ |
2007-04-03-UndefinedSymbol.ll | 38 %ctg23.i = getelementptr i8, i8* %b2.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1] 51 %ctg25.i = getelementptr i8, i8* %b4.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1] 60 %ctg23.i36 = getelementptr i8, i8* %b2.i35, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1] 73 %ctg25.i54 = getelementptr i8, i8* %b4.i53, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1] 82 %ctg23.i8 = getelementptr i8, i8* %b2.i7, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1] 95 %ctg25.i26 = getelementptr i8, i8* %b4.i25, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
|
long_shift.ll | 15 %tmp2 = lshr i64 %B, 1 66 %a = lshr i64 %x, %y
|
mvn.ll | 31 %tmp1 = lshr i32 %a, 2 ; <i32> [#uses=1] 39 %tmp2 = lshr i32 %a, %shift.upgrd.2 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/Mips/llvm-ir/ |
lshr.ll | 47 %r = lshr i1 %a, %b 58 %r = lshr i8 %a, %b 69 %r = lshr i16 %a, %b 79 %r = lshr i32 %a, %b 130 %r = lshr i64 %a, %b 185 %r = lshr i128 %a, %b
|
/external/llvm/test/CodeGen/Mips/msa/ |
shift-dagcombine.ll | 29 %1 = lshr <4 x i32> <i32 1, i32 2, i32 4, i32 8>, 37 %2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
|
/external/llvm/test/CodeGen/PowerPC/ |
rlwimi.ll | 23 %tmp.3 = lshr i32 %x, 16 ; <i32> [#uses=1] 32 %tmp.3 = lshr i32 %y, 16 ; <i32> [#uses=1]
|