/external/llvm/test/CodeGen/MSP430/ |
shifts.ll | 17 %shr = ashr i8 %a, %cnt 41 %shr = ashr i16 %a, %cnt
|
/external/llvm/test/CodeGen/PowerPC/ |
ifcvt.ll | 8 %conv29 = ashr exact i32 %sext82, 16 29 %conv39 = ashr exact i32 %sext83, 16
|
/external/llvm/test/MC/Mips/ |
mips64shift.ll | 17 %shr = ashr i64 %a0, 10 38 %shr = ashr i64 %a0, 40
|
/external/llvm/test/Transforms/InstSimplify/ |
exact-nsw-nuw.ll | 25 %C = ashr exact i32 %A, %B 42 %D = ashr i32 %C, %B
|
shift-128-kb.ll | 15 ; CHECK-NEXT: [[SHR:%.*]] = ashr i128 [[SHL]], [[SH_PROM]] 26 %shr = ashr i128 %shl, %sh_prom
|
/external/swiftshader/third_party/LLVM/test/CodeGen/MSP430/ |
shifts.ll | 17 %shr = ashr i8 %a, %cnt 41 %shr = ashr i16 %a, %cnt
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
vec-sign.ll | 9 %b.lobit = ashr <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31> 23 %b.lobit = ashr <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
|
masked-iv-unsafe.ll | 72 %indvar.i8 = ashr i64 %s0, 8 78 %indvar.i24 = ashr i64 %s1, 24 102 %indvar.i8 = ashr i64 %s0, 8 108 %indvar.i24 = ashr i64 %s1, 24 188 %indvar.i8 = ashr i64 %s0, 8 194 %indvar.i24 = ashr i64 %s1, 24 218 %indvar.i8 = ashr i64 %s0, 8 224 %indvar.i24 = ashr i64 %s1, 24 332 %indvar.i8 = ashr i64 %s0, 8 338 %indvar.i24 = ashr i64 %s1, 2 [all...] |
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
xor2.ll | 47 %2 = ashr i32 %1, %B
51 ; CHECK: %1 = ashr i32 %A, %B
|
/external/swiftshader/third_party/LLVM/test/CodeGen/SystemZ/ |
04-RetShifts.ll | 11 %shr = ashr i32 %a, %add ; <i32> [#uses=1] 32 %shr = ashr i64 %a, %add ; <i64> [#uses=1] 52 %shr = ashr i32 %a, 1 70 %shr = ashr i32 %a, %idx 88 %shr = ashr i64 %a, 1 106 %shr = ashr i64 %a, %idx
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-shifted-sext.ll | 72 %shr = ashr i32 %conv, 4 95 %shr = ashr i32 %conv, 8 117 %shr = ashr i64 %conv, 4 140 %shr = ashr i64 %conv, 8 162 %shr = ashr i32 %conv, 4 185 %shr = ashr i32 %conv, 16 207 %shr = ashr i64 %conv, 4 230 %shr = ashr i64 %conv, 16 252 %shr = ashr i64 %conv, 4 275 %shr = ashr i64 %conv, 3 [all...] |
addsub-shifted.ll | 137 %shift1 = ashr i32 %rhs32, 18 142 %shift2 = ashr i32 %rhs32, 31 147 %shift3 = ashr i32 %rhs32, 5 153 %shift4 = ashr i32 %rhs32, 19 158 %shift4a = ashr i32 %lhs32, 15 163 %shift5 = ashr i64 %rhs64, 18 168 %shift6 = ashr i64 %rhs64, 31 173 %shift7 = ashr i64 %rhs64, 5 179 %shift8 = ashr i64 %rhs64, 19 184 %shift8a = ashr i64 %lhs64, 4 [all...] |
fast-isel-shift.ll | 7 %2 = ashr i16 %1, 1 16 %2 = ashr i16 %1, 1 24 %2 = ashr i32 %1, 1 32 %2 = ashr i32 %1, 1 40 %2 = ashr i64 %1, 1 48 %2 = ashr i64 %1, 1 401 %1 = ashr i8 %a, %b 408 %1 = ashr i8 %a, 4 416 %2 = ashr i16 %1, 4 424 %2 = ashr i16 %1, [all...] |
arm64-long-shift.ll | 24 define i128 @ashr(i128 %r, i128 %s) nounwind readnone { 25 ; CHECK-LABEL: ashr: 42 %shr = ashr i128 %r, %s
|
arm64-vshr.ll | 15 %shr = ashr <8 x i16> %0, %1 30 %shr = ashr <4 x i32> %0, %1 52 %tmp3 = ashr <1 x i64> %A, < i64 63 >
|
bitfield-extract.ll | 6 %tmp = ashr i32 %a, 23 15 %tmp = ashr i32 %a, 23 63 %tmp = ashr i32 %a, 25
|
/external/llvm/test/CodeGen/X86/ |
peep-test-4.ll | 30 %ashr = ashr i32 %x, 1 31 %cmp = icmp eq i32 %ashr, 0 35 tail call void @foo(i32 %ashr) 48 %ashr = lshr i32 %x, 1 49 %cmp = icmp eq i32 %ashr, 0 53 tail call void @foo(i32 %ashr) 66 %ashr = lshr i32 %x, 3 67 %cmp = icmp eq i32 %ashr, 0 71 tail call void @foo(i32 %ashr) [all...] |
sse2-vector-shifts.ll | 100 %shl = ashr <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 110 %shl = ashr <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 120 %shl = ashr <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 129 %shl = ashr <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0> 139 %shl = ashr <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1> 149 %shl = ashr <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31> 247 %sra0 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2> 248 %sra1 = ashr <4 x i32> %sra0, <i32 4, i32 4, i32 4, i32 4> 277 %sra = ashr <4 x i32> %x, %y 297 %shl0 = ashr <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4 [all...] |
/external/llvm/test/Transforms/InstCombine/ |
apint-shift.ll | 23 %B = ashr i41 %A, 0 ; <i41> [#uses=1] 31 %B = ashr i39 0, %A ; <i39> [#uses=1] 69 %B = ashr i29 -1, %A ; <i29> [#uses=1] 110 %B = ashr i47 %A, 8 ; <i47> [#uses=1] 120 %B = ashr i18 %a, 8 ; <i18> [#uses=1] 163 %tmp.3 = ashr i84 %X, 4 ; <i84> [#uses=1] 188 %B = ashr i37 %A, 2 ; <i37> [#uses=1] 196 %B = ashr i39 %A, 2 ; <i39> [#uses=1] 204 %B = ashr i13 %A, 12 ; <i13> [#uses=1] 229 %C = ashr i44 %B, 33 ; <i44> [#uses=1 [all...] |
shift.ll | 27 %B = ashr i32 %A, 0 ; <i32> [#uses=1] 36 %B = ashr i32 0, %shift.upgrd.2 ; <i32> [#uses=1] 103 %B = ashr i32 undef, 2 ;; top two bits must be equal, so not undef 111 %B = ashr i32 undef, %A ;; top %A bits must be equal, so not undef 140 %B = ashr i32 -1, %shift.upgrd.3 ;; Always equal to -1 223 %B = ashr i32 %A, 8 ; <i32> [#uses=1] 239 %B = ashr i8 %a, 3 ; <i8> [#uses=1] 250 %B = ashr exact i8 %a, 3 307 %tmp.3 = ashr i32 %X, 4 340 %B = ashr i32 %A, 2 ; <i32> [#uses=1 [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
fp_to_uint.ll | 50 ; EG-DAG: ASHR 80 ; EG-DAG: ASHR 101 ; EG-DAG: ASHR 131 ; EG-DAG: ASHR 152 ; EG-DAG: ASHR 173 ; EG-DAG: ASHR 194 ; EG-DAG: ASHR
|
/external/llvm/test/Analysis/ScalarEvolution/ |
shift-op.ll | 46 %iv.shift = ashr i32 %iv, 1 63 %iv.shift = ashr i32 %iv, 1 80 %iv.shift = ashr i32 %iv, 1 99 %iv.shift = ashr i32 %iv, 1 154 %iv.shift = ashr i32 %iv, 1
|
/external/llvm/test/CodeGen/Mips/ |
mips64shift.ll | 14 %shr = ashr i64 %a0, %a1 35 %shr = ashr i64 %a0, 10 56 %shr = ashr i64 %a0, 40
|
/external/llvm/test/CodeGen/SystemZ/ |
shift-10.ll | 18 ; ashr/sext pair. 76 %shr = ashr i64 %shl, 60 88 %sext = ashr i32 %1, 31
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Mips/ |
mips64shift.ll | 13 %shr = ashr i64 %a0, %a1 34 %shr = ashr i64 %a0, 10 55 %shr = ashr i64 %a0, 40
|