Lines Matching refs:c2
536 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
554 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
1303 // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
1333 // fold (add c1, c2) -> c1+c2
1349 // fold ((c1-A)+c2) -> (c1+c2)-A
1425 // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
1579 // fold (sub c1, c2) -> c1-c2
1598 // fold C2-(A+C1) -> (C2-C1)-A
1638 // fold (sub Sym+c1, Sym+c2) -> c1-c2
1664 // fold (mul c1, c2) -> c1*c2
1693 // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
1725 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
1755 // fold (sdiv c1, c2) -> c1/c2
1842 // fold (udiv c1, c2) -> c1/c2
1888 // fold (srem c1, c2) -> c1%c2
1930 // fold (urem c1, c2) -> c1%c2
2168 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
2169 if (C2->getAPIntValue() == 2)
2178 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
2179 if (C2->getAPIntValue() == 2)
2274 // fold (and c1, c2) -> c1&c2
2748 // fold (or c1, c2) -> c1|c2
2776 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
2777 // iff (c1 & c2) == 0.
2834 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
2841 // We can only do this xform if we know that bits from X that are set in C2
2930 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
2931 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
3070 // fold (xor c1, c2) -> c1^c2
3138 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
3247 // fold (shl c1, c2) -> c1<<c2
3288 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
3292 uint64_t c2 = N1C->getZExtValue();
3293 if (c1 + c2 >= OpSizeInBits)
3296 DAG.getConstant(c1 + c2, N1.getValueType()));
3299 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
3311 uint64_t c2 = N1C->getZExtValue();
3314 if (c2 >= OpSizeInBits - InnerShiftSize) {
3315 if (c1 + c2 >= OpSizeInBits)
3320 DAG.getConstant(c1 + c2, N1.getValueType()));
3324 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
3325 // (and (srl x, (sub c1, c2), MASK)
3330 uint64_t c2 = N1C->getZExtValue();
3334 if (c2 > c1) {
3335 Mask = Mask.shl(c2-c1);
3337 DAG.getConstant(c2-c1, N1.getValueType()));
3339 Mask = Mask.lshr(c1-c2);
3341 DAG.getConstant(c1-c2, N1.getValueType()));
3375 // fold (sra c1, c2) -> (sra c1, c2)
3404 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
3471 // fold (sra (trunc (sr x, c1)), c2) -> (trunc (sra x, c1+c2))
3520 // fold (srl c1, c2) -> c1 >>u c2
3537 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
3541 uint64_t c2 = N1C->getZExtValue();
3542 if (c1 + c2 >= OpSizeInBits)
3545 DAG.getConstant(c1 + c2, N1.getValueType()));
3548 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
3554 uint64_t c2 = N1C->getZExtValue();
3560 if (c1 + c2 >= InnerShiftSize)
3565 DAG.getConstant(c1 + c2, ShiftCountVT)));
3710 // fold (ctlz c1) -> c2
3720 // fold (cttz c1) -> c2
3730 // fold (ctpop c1) -> c2
5243 // fold (fadd c1, c2) -> (fadd c1, c2)
5261 // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
5284 // fold (fsub c1, c2) -> c1-c2
5318 c2) -> c1*c2
5350 // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
5373 // fold (fdiv c1, c2) -> c1/c2
5400 // fold (frem c1, c2) -> fmod(c1,c2)
5760 // will convert it back to (X & C1) >> C2.
7538 // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a