Home | History | Annotate | Download | only in InstCombine

Lines Matching defs:GEP

79 Value *InstCombiner::EmitGEPOffset(User *GEP) {
80 return llvm::EmitGEPOffset(Builder, DL, GEP);
929 /// is a sequence of GEP indices into the pointed type that will land us at the
990 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
991 // If this GEP has only 0 indices, it is the same pointer as
992 // Src. If Src is not a trivial GEP too, don't combine
994 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1334 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1335 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
1338 return ReplaceInstUsesWith(GEP, V);
1340 Value *PtrOp = GEP.getOperand(0);
1346 DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());
1348 gep_type_iterator GTI = gep_type_begin(GEP);
1349 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
1379 return &GEP;
1387 // Don't fold a GEP into itself through a PHI node. This can only happen
1388 // through the back-edge of a loop. Folding a GEP into itself means that
1391 // actually achieving anything (the GEP still needs to be executed once per
1393 if (Op1 == &GEP)
1403 // As for Op1 above, don't try to fold a GEP into itself.
1404 if (Op2 == &GEP)
1407 // Keep track of the type as we walk the GEP.
1420 // The first two arguments can vary for any GEP, the rest have to be
1427 // The GEP is different by more than one input. While this could be
1457 // BB so that it can be merged with the current GEP.
1458 GEP.getParent()->getInstList().insert(
1459 GEP.getParent()->getFirstInsertionPt(), NewGEP);
1461 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
1477 GEP.getParent()->getInstList().insert(
1478 GEP.getParent()->getFirstInsertionPt(), NewGEP);
1482 GEP.setOperand(0, NewGEP);
1491 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1494 // Note that if our source is a gep chain itself then we wait for that
1504 // Find out whether the last index in the source GEP is a sequential idx.
1512 // Replace: gep (gep %P, long B), long A, ...
1513 // With: T = long A+B; gep %P, T, ...
1517 Value *GO1 = GEP.getOperand(1);
1537 // Update the GEP in place if possible.
1539 GEP.setOperand(0, Src->getOperand(0));
1540 GEP.setOperand(1, Sum);
1541 return &GEP;
1545 Indices.append(GEP.op_begin()+2, GEP.op_end());
1546 } else if (isa<Constant>(*GEP.idx_begin()) &&
1547 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
1549 // Otherwise we can do the fold if the first index of the GEP is a zero
1551 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
1555 return GEP.isInBounds() && Src->isInBounds()
1558 GEP.getName())
1561 GEP.getName());
1564 if (GEP.getNumIndices() == 1) {
1565 unsigned AS = GEP.getPointerAddressSpace();
1566 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
1568 Type *PtrTy = GEP.getPointerOperandType();
1576 V = GEP.getOperand(1);
1578 } else if (match(GEP.getOperand(1),
1582 } else if (match(GEP.getOperand(1),
1589 // Canonicalize (gep i8* X, -(ptrtoint Y))
1591 // The GEP pattern is emitted by the SCEV expander for certain kinds of
1597 return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
1599 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
1603 m_PtrToInt(m_Specific(GEP.getOperand(0)))))) {
1605 GEP.getType());
1611 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
1621 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
1624 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
1625 // into : GEP [10 x i8]* X, i32 0, ...
1627 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
1628 // into : GEP i8* X, ...
1635 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
1637 // -> GEP i8* X, ...
1638 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
1640 StrippedPtrTy->getElementType(), StrippedPtr, Idx, GEP.getName());
1641 Res->setIsInBounds(GEP.isInBounds());
1642 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
1646 // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
1648 // %0 = GEP i8 addrspace(1)* X, ...
1650 return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
1655 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
1657 // -> GEP [10 x i8]* X, i32 0, ...
1661 // is a leading zero) we can fold the cast into this GEP.
1662 GEP.getAddressSpace()) {
1663 GEP.setOperand(0, StrippedPtr);
1664 GEP.setSourceElementType(XATy);
1665 return &GEP;
1668 // address space is different. Instead, create a new GEP followed by
1671 // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
1674 // %0 = GEP [10 x i8] addrspace(1)* X, ...
1676 SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
1677 Value *NewGEP = GEP.isInBounds()
1679 nullptr, StrippedPtr, Idx, GEP.getName())
1681 GEP.getName());
1682 return new AddrSpaceCastInst(NewGEP, GEP.getType());
1686 } else if (GEP.getNumOperands() == 2) {
1695 Type *IdxType = DL.getIntPtrType(GEP.getType());
1696 Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
1698 GEP.isInBounds()
1700 GEP.getName())
1701 : Builder->CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
1703 // V and GEP are both pointer types --> BitCast
1705 GEP.getType());
1718 Value *Idx = GEP.getOperand(1);
1724 assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
1729 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1731 // GEP may not be "inbounds".
1733 GEP.isInBounds() && NSW
1735 GEP.getName())
1737 GEP.getName());
1741 GEP.getType());
1757 Value *Idx = GEP.getOperand(1);
1763 assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
1768 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1770 // GEP may not be "inbounds".
1772 Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
1775 Value *NewGEP = GEP.isInBounds() && NSW
1777 SrcElTy, StrippedPtr, Off, GEP.getName())
1779 GEP.getName());
1782 GEP.getType());
1790 // addrspacecast. To take advantage of the below bitcast + struct GEP, look
1795 // Z = gep Y, <...constant indices...>
1796 // Into an addrspacecasted GEP of the struct.
1803 /// Y = gep X, <...constant indices...>
1804 /// into a gep of the original struct. This is important for SROA and alias
1809 unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
1812 GEP.accumulateConstantOffset(DL, Offset)) {
1814 // If this GEP instruction doesn't move the pointer, just replace the GEP
1820 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1827 return &GEP;
1831 if (Operand->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
1832 return new AddrSpaceCastInst(Operand, GEP.getType());
1833 return new BitCastInst(Operand, GEP.getType());
1838 // GEP.
1842 GEP.isInBounds()
1846 if (NGEP->getType() == GEP.getType())
1847 return ReplaceInstUsesWith(GEP, NGEP);
1848 NGEP->takeName(&GEP);
1850 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
1851 return new AddrSpaceCastInst(NGEP, GEP.getType());
1852 return new BitCastInst(NGEP, GEP.getType());
2314 // load from a GEP. This reduces the size of the load. If a load is used
2330 Value *GEP = Builder->CreateInBoundsGEP(L->getType(),
2334 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
2340 // loads: extract (extract (load)) will be translated to extract (load (gep))
2341 // and if again single-use then via load (gep (gep)) to load (gep).