/external/llvm/test/CodeGen/PowerPC/ |
2004-11-29-ShrCrash.ll | 3 %tr1 = lshr i32 1, 0 ; <i32> [#uses=0]
|
fast-isel-shifter.ll | 19 define i32 @lshr() nounwind { 21 ; ELF64: lshr 23 %lshr = lshr i32 -1, 2 24 ret i32 %lshr 31 %lshr = lshr i32 %src1, %src2 32 ret i32 %lshr
|
shl_elim.ll | 4 %tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1] 6 %tmp410 = lshr i32 %tmp23, 9 ; <i32> [#uses=1]
|
sign_ext_inreg1.ll | 5 %tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1] 7 %tmp410 = lshr i32 %tmp23, 9 ; <i32> [#uses=1]
|
/external/clang/test/Analysis/ |
builtin_signbit.cpp | 12 // CHECK-LE-NOT: lshr 13 // CHECK-BE: lshr 20 // CHECK-LE-NOT: lshr 21 // CHECK-BE: lshr 28 // CHECK-LE-NOT: lshr 29 // CHECK-BE-NOT: lshr 33 // CHECK-LE-NOT: lshr 34 // CHECK-BE: lshr 41 // CHECK-LE-NOT: lshr 42 // CHECK-BE: lshr [all...] |
/external/llvm/test/CodeGen/X86/ |
vshift-2.ll | 10 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 11 store <2 x i64> %lshr, <2 x i64>* %dst 22 %lshr = lshr <2 x i64> %val, %1 23 store <2 x i64> %lshr, <2 x i64>* %dst 31 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 32 store <4 x i32> %lshr, <4 x i32>* %dst 45 %lshr = lshr <4 x i32> %val, % [all...] |
lower-vec-shift.ll | 12 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> 13 ret <8 x i16> %lshr 29 %lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2> 30 ret <8 x i16> %lshr 46 %lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2> 47 ret <4 x i32> %lshr 61 %lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2 [all...] |
2013-01-09-DAGCombineBug.ll | 51 br i1 icmp ult (i64 xor (i64 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl (i384 zext (i64 ptrtoint ([2 x i8]* @global to i64) to i384), i384 192), i384 425269881901436522087161771558896140289), i384 128) to i128), i128 64) to i64) to i192), i192 64), i192 1), i192 128) to i1) to i64), i64 1), i64 1), label %bb2, label %bb3 57 br i1 xor (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl (i384 zext (i64 ptrtoint ([2 x i8]* @global to i64) to i384), i384 192), i384 425269881901436522087161771558896140289), i384 128) to i128), i128 64) to i64) to i192), i192 64), i192 1), i192 128) to i1), i1 trunc (i192 lshr (i192 or (i192 and (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl (i384 zext (i64 ptrtoint ([2 x i8]* @global to i64) to i384), i384 192), i384 425269881901436522087161771558896140289), i384 128) to i128), i128 64) to i64) to i192), i192 64), i192 1), i192 -340282366920938463463374607431768211457), i192 shl (i192 zext (i1 trunc (i192 lshr (i192 or (i192 shl (i192 zext (i64 trunc (i128 lshr (i128 trunc (i384 lshr (i384 or (i384 shl (i38 (…) [all...] |
h-registers-1.ll | 22 %sa = lshr i64 %a, 8 24 %sb = lshr i64 %b, 8 26 %sc = lshr i64 %c, 8 28 %sd = lshr i64 %d, 8 30 %se = lshr i64 %e, 8 32 %sf = lshr i64 %f, 8 34 %sg = lshr i64 %g, 8 36 %sh = lshr i64 %h, 8
|
shl_elim.ll | 4 %tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1] 6 %tmp410 = lshr i32 %tmp23, 9 ; <i32> [#uses=1]
|
2011-06-06-fgetsign80bit.ll | 5 %tmp4.lobit = lshr i80 %tmp4, 79
|
shift-i128.ll | 6 %0 = lshr i128 %x, %a
|
/external/llvm/test/CodeGen/ARM/ |
fast-isel-shifter.ll | 20 define i32 @lshr() nounwind ssp { 22 ; ARM: lshr 24 %lshr = lshr i32 -1, 2 25 ret i32 %lshr 32 %lshr = lshr i32 %src1, %src2 33 ret i32 %lshr
|
/external/llvm/test/Transforms/InstCombine/ |
2006-11-10-ashr-miscompile.ll | 1 ; RUN: opt < %s -instcombine -S | grep lshr 6 %B = lshr i32 -1, %shift.upgrd.1 ; <i32> [#uses=1]
|
udiv-simplify-bug-0.ll | 4 %y = lshr i32 %x, 1 10 %y = lshr i32 %x, 31
|
/external/llvm/test/Analysis/CostModel/X86/ |
testshiftlshr.ll | 8 ; SSE2: cost of 4 {{.*}} lshr 12 %0 = lshr %shifttype %a , %b 20 ; SSE2: cost of 16 {{.*}} lshr 24 %0 = lshr %shifttype4i16 %a , %b 32 ; SSE2: cost of 32 {{.*}} lshr 36 %0 = lshr %shifttype8i16 %a , %b 44 ; SSE2: cost of 64 {{.*}} lshr 48 %0 = lshr %shifttype16i16 %a , %b 56 ; SSE2: cost of 128 {{.*}} lshr 60 %0 = lshr %shifttype32i16 %a , % [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
bfe_uint.ll | 7 %0 = lshr i32 %x, 5 14 ; since the lshr constant + number of bits in the mask is >= 32, it can also be 15 ; implmented with a LSHR instruction, which is better, because LSHR has less 22 %0 = lshr i32 %x, 16
|
fp_to_uint.ll | 47 ; EG-DAG: LSHR 57 ; EG-DAG: LSHR 58 ; EG-DAG: LSHR 77 ; EG-DAG: LSHR 87 ; EG-DAG: LSHR 88 ; EG-DAG: LSHR 98 ; EG-DAG: LSHR 108 ; EG-DAG: LSHR 109 ; EG-DAG: LSHR 128 ; EG-DAG: LSHR [all...] |
/external/llvm/test/CodeGen/Thumb2/ |
bfx.ll | 6 %t1 = lshr i32 %a, 7 15 %t1 = lshr i32 %a, 7 24 %t1 = lshr i32 %a, 7
|
thumb2-lsr.ll | 6 %tmp = lshr i32 %a, 13
|
thumb2-lsr2.ll | 6 %tmp = lshr i32 %a, %b
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
pr23510.ll | 8 ; CHECK: lshr <2 x i64> 9 ; CHECK: lshr <2 x i64> 16 %shr = lshr i64 %tmp, 4 20 %shr2 = lshr i64 %tmp1, 4 28 %shr5 = lshr i64 %tmp4, 4 31 %shr7 = lshr i64 %tmp5, 4
|
/external/llvm/test/Analysis/ScalarEvolution/ |
2009-07-04-GroupConstantsWidthMismatch.ll | 7 %1 = lshr i16 %0, 8 11 %5 = lshr i8 %4, 4
|
div-overflow.ll | 9 %t1 = lshr i8 %t0, 7
|
/external/clang/test/CodeGen/ |
2007-08-22-CTTZ.c | 6 // CHECK-NOT: lshr
|