/external/llvm/test/CodeGen/MSP430/ |
2009-05-17-Shift.ll | 8 %2 = lshr i16 %1, 2
|
/external/llvm/test/CodeGen/Mips/ |
rotate.ll | 10 %shr = lshr i32 %a, %sub 20 %shr = lshr i32 %a, 22 29 %shr = lshr i32 %a, %b 40 %shr = lshr i32 %a, 10
|
mips64shift.ll | 20 %shr = lshr i64 %a0, %a1 41 %shr = lshr i64 %a0, 10 62 %shr = lshr i64 %a0, 40 70 %shr = lshr i64 %a0, %a1 83 %shr = lshr i64 %a0, %sub 91 %shr = lshr i64 %a0, 10 101 %shr = lshr i64 %a0, 54
|
/external/llvm/test/CodeGen/PowerPC/ |
shift128.ll | 4 %r = lshr i128 %x, %y
|
/external/llvm/test/CodeGen/SystemZ/ |
vec-shift-03.ll | 10 %ret = lshr <16 x i8> %val1, %val2 19 %ret = lshr <8 x i16> %val1, %val2 28 %ret = lshr <4 x i32> %val1, %val2 37 %ret = lshr <2 x i64> %val1, %val2
|
shift-02.ll | 10 %shift = lshr i32 %a, 1 19 %shift = lshr i32 %a, 31 28 %shift = lshr i32 %a, 32 38 %shift = lshr i32 %a, %sub 47 %shift = lshr i32 %a, %amt 57 %shift = lshr i32 %a, %add 68 %shift = lshr i32 %a, %trunc 79 %shift = lshr i32 %a, %add 90 %shift = lshr i32 %a, %add 101 %shift = lshr i32 %a, %ad [all...] |
shift-08.ll | 11 %partb = lshr i64 %a, 63 22 %partb = lshr i64 %a, 1 33 %partb = lshr i64 %a, 0 45 %partb = lshr i64 %a, %amtb 58 %partb = lshr i64 %a, %sub 73 %partb = lshr i64 %a, %subext 88 %partb = lshr i64 %a, %subext 102 %partb = lshr i64 %a, %sub 117 %partb = lshr i64 %a, %sub 130 %partb = lshr i64 %a, %sub [all...] |
/external/llvm/test/CodeGen/X86/ |
ptr-rotate.ll | 6 %shr = lshr i32 %tmp, 5
|
wide-integer-fold.ll | 9 %u = lshr i192 %s, 128
|
constant-hoisting-shift-immediate.ll | 5 ; have %lshr2 = lshr i192 %data2, %const, and the definition of %const would 10 %lshr1 = lshr i192 %data1, 128 18 %lshr2 = lshr i192 %data2, 128
|
pr13220.ll | 5 %a = lshr <8 x i96> %x, <i96 1, i96 1, i96 1, i96 1, i96 1, i96 1, i96 1, i96 1> 11 %a = lshr <8 x i97> %x, <i97 1, i97 1, i97 1, i97 1, i97 1, i97 1, i97 1, i97 1> 17 %a = lshr <8 x i96> <i96 4, i96 4, i96 4, i96 4, i96 4, i96 4, i96 4, i96 4>, <i96 1, i96 1, i96 1, i96 1, i96 1, i96 1, i96 1, i96 1>
|
rotate.ll | 9 %C = lshr i32 %A, %shift.upgrd.2 ; <i32> [#uses=1] 16 %B = lshr i32 %A, %shift.upgrd.3 ; <i32> [#uses=1] 26 %C = lshr i32 %A, 27 ; <i32> [#uses=1] 32 %B = lshr i32 %A, 5 ; <i32> [#uses=1] 43 %C = lshr i16 %A, %shift.upgrd.6 ; <i16> [#uses=1] 50 %B = lshr i16 %A, %shift.upgrd.7 ; <i16> [#uses=1] 60 %C = lshr i16 %A, 11 ; <i16> [#uses=1] 66 %B = lshr i16 %A, 5 ; <i16> [#uses=1] 75 %C = lshr i8 %A, %Amt2 ; <i8> [#uses=1] 81 %B = lshr i8 %A, %Amt ; <i8> [#uses=1 [all...] |
h-register-addressing-32.ll | 6 %t0 = lshr i32 %x, 8 16 %t0 = lshr i32 %x, 8 26 %t0 = lshr i32 %x, 8 36 %t0 = lshr i32 %x, 8 46 %t0 = lshr i32 %x, 5 56 %t0 = lshr i32 %x, 6 66 %t0 = lshr i32 %x, 7
|
h-register-addressing-64.ll | 6 %t0 = lshr i64 %x, 8 16 %t0 = lshr i64 %x, 8 26 %t0 = lshr i64 %x, 8 36 %t0 = lshr i64 %x, 8 46 %t0 = lshr i64 %x, 5 56 %t0 = lshr i64 %x, 6 66 %t0 = lshr i64 %x, 7
|
/external/llvm/test/Transforms/ConstProp/ |
shift.ll | 5 %r1 = lshr i64 -1, 4294967296 ; 2^32 22 %r1 = lshr i65 2, 18446744073709551617 39 %r1 = lshr i256 2, 18446744073709551617 56 %r1 = lshr i511 -1, 1208925819614629174706276 ; 2^80 + 100
|
/external/llvm/test/Transforms/InstSimplify/ |
exact-nsw-nuw.ll | 8 %C = lshr exact i32 %A, %B 14 ; CHECK: lshr 17 %C = lshr i32 %A, %B 34 %D = lshr i32 %C, %B
|
/external/llvm/test/Analysis/CostModel/X86/ |
arith.ll | 86 ; AVX: cost of 2 {{.*}} lshr 87 ; AVX2: cost of 1 {{.*}} lshr 88 %B0 = lshr <4 x i32> undef, undef 89 ; AVX: cost of 2 {{.*}} lshr 90 ; AVX2: cost of 1 {{.*}} lshr 91 %B1 = lshr <2 x i64> undef, undef 113 ; AVX: cost of 2 {{.*}} lshr 114 ; AVX2: cost of 1 {{.*}} lshr 115 %B0 = lshr <8 x i32> undef, undef 116 ; AVX: cost of 2 {{.*}} lshr [all...] |
/external/llvm/test/CodeGen/ARM/ |
ifconv-regmask.ll | 11 %bf.lshr = lshr i32 %bf.load, 26 12 %bf.clear = and i32 %bf.lshr, 7
|
/external/llvm/test/CodeGen/Mips/Fast-ISel/ |
shift.ll | 13 %bf.lshr = lshr i32 %bf.load, 2 14 %cmp = icmp ne i32 %bf.lshr, 2
|
/external/llvm/test/Transforms/InstCombine/ |
apint-shift-simplify.ll | 15 %X = lshr i57 %A, %C 16 %Y = lshr i57 %B, %C 21 ; CHECK-NEXT: lshr i57
|
div-shift.ll | 7 ; CHECK: lshr i32 %conv 18 ; CHECK: lshr i64 %x 31 ; CHECK-NEXT: %3 = lshr i64 %x, %2 44 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 %x, [[SEL]] 57 ; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 %V, [[SEL1]] 58 ; CHECK-NEXT: [[SEL2:%.*]] = select i1 %y, i32 [[LSHR]], i32 0
|
/external/llvm/test/CodeGen/AMDGPU/ |
fp_to_sint.ll | 56 ; EG-DAG: LSHR 66 ; EG-DAG: LSHR 67 ; EG-DAG: LSHR 88 ; EG-DAG: LSHR 98 ; EG-DAG: LSHR 99 ; EG-DAG: LSHR 109 ; EG-DAG: LSHR 119 ; EG-DAG: LSHR 120 ; EG-DAG: LSHR 139 ; EG-DAG: LSHR [all...] |
/external/llvm/test/Bitcode/ |
bitwiseInstructions.3.2.ll | 25 define void @lshr(i8 %x1){ 27 ; CHECK: %res1 = lshr i8 %x1, %x1 28 %res1 = lshr i8 %x1, %x1 30 ; CHECK: %res2 = lshr exact i8 %x1, %x1 31 %res2 = lshr exact i8 %x1, %x1
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-extract.ll | 7 %right = lshr i64 %in, 45 16 %right = lshr i32 %in, 23 25 %right = lshr i32 %rhs, 26 36 %right = lshr i64 %rhs, 40 52 %sh2 = lshr i32 %b, 14
|
extract.ll | 6 %right = lshr i64 %in, 45 15 %right = lshr i32 %in, 23 24 %right = lshr i32 %rhs, 26 35 %right = lshr i64 %rhs, 40 51 %sh2 = lshr i32 %b, 14
|