/external/llvm/test/Analysis/ScalarEvolution/ |
undefined.ll | 17 %c = lshr i64 %x, 64 29 %f = lshr i64 %x, -1
|
/external/llvm/test/CodeGen/AArch64/ |
andandshift.ll | 11 %shr1 = lshr i32 %conv, 3 23 %shr5 = lshr i32 %conv, 3
|
arm64-dagcombiner-convergence.ll | 10 %tmp = lshr i128 %Params.coerce, 61 13 %tmp1 = lshr i128 %SelLocs.coerce, 62
|
arm64-regress-interphase-shift.ll | 17 %shr404 = lshr i64 %and398, 0 27 %shr437 = lshr i64 %and431, %and432
|
/external/llvm/test/CodeGen/AMDGPU/ |
partially-dead-super-register-immediate.ll | 18 %lshr = shl i64 %val, 24 19 %and1 = and i64 %lshr, 2190433320969 ; (255 << 33) | 9
|
udivrem64.ll | 163 %1 = lshr i64 %x, 33 164 %2 = lshr i64 %y, 33 180 %1 = lshr i64 %x, 33 181 %2 = lshr i64 %y, 33 199 %1 = lshr i64 %x, 40 200 %2 = lshr i64 %y, 40 218 %1 = lshr i64 %x, 40 219 %2 = lshr i64 %y, 40
|
/external/llvm/test/CodeGen/ARM/ |
fast-isel-shift-materialize.ll | 19 %tmp4 = lshr i32 %tmp3, 2 20 %tmp10 = lshr i32 %tmp9, 2
|
rev.ll | 6 %tmp1 = lshr i32 %X, 8 22 %tmp1 = lshr i32 %X, 8 49 %shr9 = lshr i16 %a, 8 65 %shr23 = lshr i32 %i, 8 78 %and2 = lshr i32 %x, 8 94 %and = lshr i32 %a, 8 106 %and = lshr i32 %a, 8 121 %shr4 = lshr i32 %conv, 8
|
sxt_rot.ll | 14 %B = lshr i32 %A, 8 24 %B = lshr i32 %A, 8
|
trunc_ldr.ll | 10 %tmp512 = lshr i32 %tmp4, 24 19 %tmp512 = lshr i32 %tmp4, 24
|
/external/llvm/test/CodeGen/PowerPC/ |
rlwinm2.ll | 15 %tmp3 = lshr i32 %X, %tmp2 ; <i32> [#uses=1] 23 %tmp1 = lshr i32 %X, 27 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/Thumb2/ |
2009-11-13-STRDBug.ll | 10 %0 = lshr i64 %h1, 0 ; <i64> [#uses=1] 12 %1 = lshr i64 %l1, 0 ; <i64> [#uses=1]
|
thumb2-mulhi.ll | 9 %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1] 20 %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1]
|
thumb2-rev16.ll | 10 %r8 = lshr i32 %a, 8 23 %r8 = lshr i32 %a, 8
|
thumb2-ror.ll | 8 %r8 = lshr i32 %a, 22 21 %shr = lshr i32 %v, %and
|
thumb2-rsb.ll | 12 %tmp = lshr i32 %b, 6 29 %r8 = lshr i32 %a, 8
|