/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
pr3216.ll | 15 %bf.val.sext = ashr i8 %1, 5
|
vshift_split2.ll | 8 %shr = ashr <8 x i32> %val, < i32 2, i32 2, i32 2, i32 2, i32 4, i32 4, i32 4, i32 4 >
|
2008-05-12-tailmerge-5.ll | 51 %tmp17 = ashr i32 %tmp16, 23 ; <i32> [#uses=1] 54 %sextr = ashr i16 %sextl, 7 ; <i16> [#uses=2] 56 %sextr20 = ashr i16 %sextl19, 7 ; <i16> [#uses=0] 58 %sextr22 = ashr i16 %sextl21, 7 ; <i16> [#uses=1] 87 %tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1] 89 %sextr41 = ashr i16 %sextl40, 7 ; <i16> [#uses=2] 91 %sextr43 = ashr i16 %sextl42, 7 ; <i16> [#uses=0] 93 %sextr45 = ashr i16 %sextl44, 7 ; <i16> [#uses=1] 108 %tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1] 110 %sextr57 = ashr i16 %sextl56, 7 ; <i16> [#uses=2 [all...] |
/external/swiftshader/third_party/LLVM/test/ExecutionEngine/ |
test-shift.ll | 14 %tr1.s = ashr i32 1, %shift.upgrd.5 ; <i32> [#uses=0]
15 %tr2.s = ashr i32 1, 4 ; <i32> [#uses=0]
19 %tr1.l = ashr i64 1, 4 ; <i64> [#uses=0]
21 %tr2.l = ashr i64 1, %shift.upgrd.7 ; <i64> [#uses=0]
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
2009-03-18-vector-ashr-crash.ll | 7 %5 = ashr <4 x i16> %4, <i16 5, i16 5, i16 5, i16 5> ; <<4 x i16>> [#uses=1]
|
apint-select.ll | 17 ;; (x <s 0) ? -1 : 0 -> ashr x, 31
24 ;; (x <s 0) ? -1 : 0 -> ashr x, 31
|
apint-shift.ll | 18 %B = ashr i41 %A, 0 ; <i41> [#uses=1]
24 %B = ashr i39 0, %A ; <i39> [#uses=1]
46 %B = ashr i29 -1, %A ; <i29> [#uses=1]
76 %B = ashr i47 %A, 8 ; <i47> [#uses=1]
83 %B = ashr i18 %a, 8 ; <i18> [#uses=1]
116 %tmp.3 = ashr i84 %X, 4 ; <i84> [#uses=1]
135 %B = ashr i37 %A, 2 ; <i37> [#uses=1]
141 %B = ashr i39 %A, 2 ; <i39> [#uses=1]
147 %B = ashr i13 %A, 12 ; <i13> [#uses=1]
166 %C = ashr i44 %B, 33 ; <i44> [#uses=1] [all...] |
exact.ll | 11 ; CHECK: ashr exact i32 %x, 3 72 ; CHECK: %B = ashr exact i64 %A, 2 76 %B = ashr i64 %A, 2 ; X/4 85 %A = ashr exact i64 %X, 2 ; X/4 94 %Y = ashr exact i64 %X, 2 ; x / 4 100 ; Make sure we don't transform the ashr here into an sdiv 108 %X = ashr exact i32 %W, 31
|
signext.ll | 12 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16
23 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16
54 ; CHECK: %tmp.3 = ashr exact i32 %sext, 24
60 %tmp.4 = ashr i32 %tmp.2, 16 ; <i32> [#uses=1]
64 ; CHECK: %tmp.4 = ashr exact i32 %tmp.2, 16
71 %tmp.5 = ashr i32 %sext1, 16 ; <i32> [#uses=1]
85 ; CHECK: %shr = ashr i32 %x, 5
|
/external/llvm/test/Analysis/ScalarEvolution/ |
undefined.ll | 14 %b = ashr i64 %B, 64 26 %e = ashr i64 %E, -1
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-2013-01-23-sext-crash.ll | 7 %B17 = ashr <4 x i32> zeroinitializer, zeroinitializer 21 %B17 = ashr <4 x i32> zeroinitializer, zeroinitializer
|
arm64-clrsb.ll | 13 %shr = ashr i32 %x, 31 27 %shr = ashr i64 %x, 63
|
arm64-coalesce-ext.ll | 9 %F = ashr i64 %E, 32
|
/external/llvm/test/CodeGen/AMDGPU/ |
mad_int24.ll | 17 %a_24 = ashr i32 %0, 8 19 %b_24 = ashr i32 %1, 8
|
mul_int24.ll | 17 %a_24 = ashr i32 %0, 8 19 %b_24 = ashr i32 %1, 8
|
sdivrem64.ll | 163 %1 = ashr i64 %x, 33 164 %2 = ashr i64 %y, 33 180 %1 = ashr i64 %x, 33 181 %2 = ashr i64 %y, 33 200 %1 = ashr i64 %x, 40 201 %2 = ashr i64 %y, 40 220 %1 = ashr i64 %x, 40 221 %2 = ashr i64 %y, 40
|
/external/llvm/test/CodeGen/X86/ |
avx512-shift.ll | 12 %d = ashr <16 x i32> %c, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12> 24 %d = ashr <8 x i64> %c, <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12> 36 %d = ashr <4 x i64> %c, <i64 12, i64 12, i64 12, i64 12> 76 %k = ashr <16 x i32> %x, %y 84 %k = ashr <8 x i64> %x, %y 92 %k = ashr <4 x i64> %x, %y 100 %k = ashr <8 x i16> %x, %y 109 %k = ashr <16 x i32> %x, %y1
|
2008-05-12-tailmerge-5.ll | 51 %tmp17 = ashr i32 %tmp16, 23 ; <i32> [#uses=1] 54 %sextr = ashr i16 %sextl, 7 ; <i16> [#uses=2] 56 %sextr20 = ashr i16 %sextl19, 7 ; <i16> [#uses=0] 58 %sextr22 = ashr i16 %sextl21, 7 ; <i16> [#uses=1] 87 %tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1] 89 %sextr41 = ashr i16 %sextl40, 7 ; <i16> [#uses=2] 91 %sextr43 = ashr i16 %sextl42, 7 ; <i16> [#uses=0] 93 %sextr45 = ashr i16 %sextl44, 7 ; <i16> [#uses=1] 108 %tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1] 110 %sextr57 = ashr i16 %sextl56, 7 ; <i16> [#uses=2 [all...] |
shift-combine.ll | 25 %shr = ashr exact i32 %sub, 3 35 %shr = ashr exact i32 %sub, 3 45 %shr = ashr exact i32 %sub, 2
|
/external/swiftshader/third_party/LLVM/test/Analysis/ScalarEvolution/ |
undefined.ll | 14 %b = ashr i64 %B, 64 26 %e = ashr i64 %E, -1
|
/external/swiftshader/third_party/LLVM/test/Transforms/InstSimplify/ |
exact-nsw-nuw.ll | 25 %C = ashr exact i32 %A, %B 42 %D = ashr i32 %C, %B
|
/external/llvm/test/Analysis/CostModel/AMDGPU/ |
shifts.ll | 43 ; ALL: estimated cost of 1 for {{.*}} ashr i32 46 %or = ashr i32 %vec, %b 52 ; FAST64: estimated cost of 2 for {{.*}} ashr i64 53 ; SLOW64: estimated cost of 3 for {{.*}} ashr i64 56 %or = ashr i64 %vec, %b
|
/external/clang/test/OpenMP/ |
atomic_read_codegen.c | 243 // CHECK: ashr i32 [[SHL]], 1 251 // CHECK: ashr i32 [[SHL]], 1 258 // CHECK: ashr i32 [[LD]], 31 265 // CHECK: ashr i8 [[LD]], 7 273 // CHECK: ashr i32 [[SHL]], 18 281 // CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10 282 // CHECK: sext i24 [[ASHR]] to i32 290 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 6 [all...] |
/external/llvm/test/Transforms/InstCombine/ |
signext.ll | 12 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16 23 ; CHECK: %tmp.3 = ashr exact i32 %sext, 16 44 ; CHECK: %tmp.3 = ashr exact i32 %sext, 24 50 %tmp.4 = ashr i32 %tmp.2, 16 ; <i32> [#uses=1] 54 ; CHECK: %tmp.4 = ashr exact i32 %tmp.2, 16 61 %tmp.5 = ashr i32 %sext1, 16 ; <i32> [#uses=1] 75 ; CHECK: %sub = ashr i32 %x, 5
|
/external/llvm/test/CodeGen/PowerPC/ |
reloc-align.ll | 29 %bf.ashr = ashr i64 %bf.shl, 54 30 %bf.cast = trunc i64 %bf.ashr to i32
|