/external/llvm/test/Transforms/BBVectorize/X86/ |
sh-rec3.ll | 24 %conv1 = lshr i32 %1, 2 34 %2 = lshr i64 %0, 50 70 %8 = lshr i32 %7, 16 86 %shr17913 = lshr i32 %conv17826, 1 115 %15 = lshr i64 %14, 32 135 %shr29420 = lshr i32 %conv29327, 1 159 %22 = lshr i32 %21, 17
|
/external/llvm/test/CodeGen/X86/ |
2009-06-02-RewriterBug.ll | 28 %shr85 = lshr i64 %conv31, 25 ; <i64> [#uses=0] 63 %and445 = lshr i64 %add365, 2 ; <i64> [#uses=1] 87 %and572 = lshr i64 %add479, 22 ; <i64> [#uses=1] 124 %and787 = lshr i64 %add707, 2 ; <i64> [#uses=1] 189 %shr = lshr i64 %conv31, 6 ; <i64> [#uses=1] 191 %shr85 = lshr i64 %conv31, 25 ; <i64> [#uses=0] 220 %and230 = lshr i64 %add137, 22 ; <i64> [#uses=1] 289 %shr1444 = lshr i64 %conv1439, 7 ; <i64> [#uses=1] 291 %shr1450 = lshr i64 %conv1439, 18 ; <i64> [#uses=1] 293 %shr1454 = lshr i64 %conv1439, 3 ; <i64> [#uses=1 [all...] |
avg.ll | 35 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1> 71 %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 97 %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 124 %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 143 %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 179 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1> 205 %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 232 %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 251 %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 287 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1 [all...] |
bmi.ll | 115 %1 = lshr i32 %x, 4 124 %2 = lshr i32 %1, 4 141 %1 = lshr i64 %x, 4 150 %2 = lshr i64 %1, 4 159 %shr = lshr i32 %x, 2 169 %shr = lshr i64 %x, 2
|
lower-vec-shift-2.ll | 79 %shr = lshr <8 x i16> %A, %vecinit14 99 %shr = lshr <4 x i32> %A, %vecinit6 115 %shr = lshr <2 x i64> %A, %vecinit2
|
shrink-compare.ll | 76 %bf.lshr = lshr i56 %bf.load, 32 77 %bf.cast = trunc i56 %bf.lshr to i32
|
/external/llvm/test/Transforms/InstCombine/ |
cast.ll | 285 %tmp21 = lshr i32 %c1, 8 ; <i32> [#uses=1] 288 ; CHECK: %tmp21 = lshr i16 %a, 8 294 %tmp2 = lshr i16 %c1, 8 ; <i16> [#uses=1] 297 ; CHECK: %tmp2 = lshr i16 %a, 8 304 %b = lshr i32 %a, 31 314 %b = lshr i32 %a, 31 335 %tmp21 = lshr i32 %tmp, 8 347 %tmp21 = lshr i32 %tmp, 9 353 ; CHECK: %tmp21 = lshr i16 %a, 9 483 %a = lshr i64 %A, [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
sext-in-reg.ll | 15 ; EG: LSHR * [[ADDR]] 33 ; EG-NEXT: LSHR * [[ADDR]] 51 ; EG-NEXT: LSHR * [[ADDR]] 69 ; EG-NEXT: LSHR * [[ADDR]] 105 ; EG: LSHR 106 ; EG: LSHR 130 ; EG: LSHR 131 ; EG: LSHR 155 ; EG: LSHR 156 ; EG: LSHR [all...] |
llvm.AMDGPU.read.workdim.ll | 30 %shr = lshr i32 %shl, 24
|
no-shrink-extloads.ll | 104 %srl = lshr i64 %arg, 32 118 %srl = lshr i64 %load, 32 151 %srl = lshr i64 %arg, 32 165 %srl = lshr i64 %load, 32
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-vecFold.ll | 5 %vshrn_low_shift = lshr <8 x i16> %a0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> 7 %vshrn_high_shift = lshr <8 x i16> %b0, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> 21 %vshrn_low_shift = lshr <4 x i32> %a0, <i32 5, i32 5, i32 5, i32 5> 23 %vshrn_high_shift = lshr <4 x i32> %b0, <i32 5, i32 5, i32 5, i32 5> 37 %vshrn_low_shift = lshr <2 x i64> %a0, <i64 5, i64 5> 39 %vshrn_high_shift = lshr <2 x i64> %b0, <i64 5, i64 5>
|
fold-constants.ll | 28 %vsra_n = lshr <8 x i8> %vclz_v.i, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
|
/external/llvm/test/CodeGen/Hexagon/ |
circ_st.ll | 22 %shr2 = lshr i32 %conv, 1 38 %shr1 = lshr i32 %conv, 1 90 %shr1 = lshr i32 %conv, 1
|
expand-condsets-rm-segment.ll | 86 %shr = lshr i64 %add, 16 90 %shr36 = lshr i64 %add35, 16 97 %shr42 = lshr i64 %add41, 32
|
sube.ll | 25 %tmp21 = lshr i128 %tmp15, 64
|
/external/llvm/test/Transforms/FunctionAttrs/ |
nocapture.ll | 25 %tmp2 = lshr i32 %tmp, %bitno 39 %tmp2 = lshr i32 %tmp, %bitno 65 %tmp2 = lshr i32 %tmp, %bitno
|
/external/llvm/test/Analysis/CFLAliasAnalysis/ |
full-store-partial-alias.ll | 30 %tmp5.lobit = lshr i32 %tmp5, 31
|
/external/llvm/test/Analysis/ScalarEvolution/ |
avoid-infinite-recursion-0.ll | 13 %3 = lshr i64 %2, 3 ; <i64> [#uses=1]
|
/external/llvm/test/CodeGen/ARM/ |
2007-04-30-CombinerCrash.ll | 19 %tmp59 = lshr i64 %tmp58, 8 ; <i64> [#uses=1]
|
2008-11-18-ScavengerAssert.ll | 10 %1 = lshr i64 %v, 32 ; <i64> [#uses=1]
|
2012-01-23-PostRA-LICM.ll | 25 %tmp9 = fsub <4 x float> %tmp8, bitcast (i128 or (i128 shl (i128 zext (i64 trunc (i128 lshr (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128), i128 64) to i64) to i128), i128 64), i128 zext (i64 trunc (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128) to i64) to i128)) to <4 x float>) 29 %tmp13 = lshr i128 %tmp12, 64 50 %tmp33 = fsub <4 x float> %tmp32, bitcast (i128 or (i128 shl (i128 zext (i64 trunc (i128 lshr (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128), i128 64) to i64) to i128), i128 64), i128 zext (i64 trunc (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128) to i64) to i128)) to <4 x float>) 81 %tmp64 = lshr i128 %tmp63, 64
|
ldr.ll | 66 %tmp1 = lshr i32 %offset, 2
|
pr3502.ll | 16 %2 = lshr i32 %1, 20 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/Generic/ |
2006-06-13-ComputeMaskedBitsCrash.ll | 24 %tmp147 = lshr i32 %tmp146, 31 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/NVPTX/ |
rotate.ll | 55 %t1 = lshr i32 %x, 24
|