Home | History | Annotate | Download | only in Utils
      1 //===- AddrModeMatcher.cpp - Addressing mode matching facility --*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements target addressing mode matcher class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
     15 #include "llvm/DerivedTypes.h"
     16 #include "llvm/GlobalValue.h"
     17 #include "llvm/Instruction.h"
     18 #include "llvm/Assembly/Writer.h"
     19 #include "llvm/Target/TargetData.h"
     20 #include "llvm/Support/Debug.h"
     21 #include "llvm/Support/GetElementPtrTypeIterator.h"
     22 #include "llvm/Support/PatternMatch.h"
     23 #include "llvm/Support/raw_ostream.h"
     24 #include "llvm/Support/CallSite.h"
     25 
     26 using namespace llvm;
     27 using namespace llvm::PatternMatch;
     28 
     29 void ExtAddrMode::print(raw_ostream &OS) const {
     30   bool NeedPlus = false;
     31   OS << "[";
     32   if (BaseGV) {
     33     OS << (NeedPlus ? " + " : "")
     34        << "GV:";
     35     WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
     36     NeedPlus = true;
     37   }
     38 
     39   if (BaseOffs)
     40     OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
     41 
     42   if (BaseReg) {
     43     OS << (NeedPlus ? " + " : "")
     44        << "Base:";
     45     WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
     46     NeedPlus = true;
     47   }
     48   if (Scale) {
     49     OS << (NeedPlus ? " + " : "")
     50        << Scale << "*";
     51     WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
     52     NeedPlus = true;
     53   }
     54 
     55   OS << ']';
     56 }
     57 
     58 #ifndef NDEBUG
     59 void ExtAddrMode::dump() const {
     60   print(dbgs());
     61   dbgs() << '\n';
     62 }
     63 #endif
     64 
     65 
     66 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
     67 /// Return true and update AddrMode if this addr mode is legal for the target,
     68 /// false if not.
     69 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
     70                                              unsigned Depth) {
     71   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
     72   // mode.  Just process that directly.
     73   if (Scale == 1)
     74     return MatchAddr(ScaleReg, Depth);
     75 
     76   // If the scale is 0, it takes nothing to add this.
     77   if (Scale == 0)
     78     return true;
     79 
     80   // If we already have a scale of this value, we can add to it, otherwise, we
     81   // need an available scale field.
     82   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
     83     return false;
     84 
     85   ExtAddrMode TestAddrMode = AddrMode;
     86 
     87   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
     88   // [A+B + A*7] -> [B+A*8].
     89   TestAddrMode.Scale += Scale;
     90   TestAddrMode.ScaledReg = ScaleReg;
     91 
     92   // If the new address isn't legal, bail out.
     93   if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
     94     return false;
     95 
     96   // It was legal, so commit it.
     97   AddrMode = TestAddrMode;
     98 
     99   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
    100   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
    101   // X*Scale + C*Scale to addr mode.
    102   ConstantInt *CI = 0; Value *AddLHS = 0;
    103   if (isa<Instruction>(ScaleReg) &&  // not a constant expr.
    104       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
    105     TestAddrMode.ScaledReg = AddLHS;
    106     TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
    107 
    108     // If this addressing mode is legal, commit it and remember that we folded
    109     // this instruction.
    110     if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
    111       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
    112       AddrMode = TestAddrMode;
    113       return true;
    114     }
    115   }
    116 
    117   // Otherwise, not (x+c)*scale, just return what we have.
    118   return true;
    119 }
    120 
    121 /// MightBeFoldableInst - This is a little filter, which returns true if an
    122 /// addressing computation involving I might be folded into a load/store
    123 /// accessing it.  This doesn't need to be perfect, but needs to accept at least
    124 /// the set of instructions that MatchOperationAddr can.
    125 static bool MightBeFoldableInst(Instruction *I) {
    126   switch (I->getOpcode()) {
    127   case Instruction::BitCast:
    128     // Don't touch identity bitcasts.
    129     if (I->getType() == I->getOperand(0)->getType())
    130       return false;
    131     return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
    132   case Instruction::PtrToInt:
    133     // PtrToInt is always a noop, as we know that the int type is pointer sized.
    134     return true;
    135   case Instruction::IntToPtr:
    136     // We know the input is intptr_t, so this is foldable.
    137     return true;
    138   case Instruction::Add:
    139     return true;
    140   case Instruction::Mul:
    141   case Instruction::Shl:
    142     // Can only handle X*C and X << C.
    143     return isa<ConstantInt>(I->getOperand(1));
    144   case Instruction::GetElementPtr:
    145     return true;
    146   default:
    147     return false;
    148   }
    149 }
    150 
    151 
    152 /// MatchOperationAddr - Given an instruction or constant expr, see if we can
    153 /// fold the operation into the addressing mode.  If so, update the addressing
    154 /// mode and return true, otherwise return false without modifying AddrMode.
    155 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
    156                                                unsigned Depth) {
    157   // Avoid exponential behavior on extremely deep expression trees.
    158   if (Depth >= 5) return false;
    159 
    160   switch (Opcode) {
    161   case Instruction::PtrToInt:
    162     // PtrToInt is always a noop, as we know that the int type is pointer sized.
    163     return MatchAddr(AddrInst->getOperand(0), Depth);
    164   case Instruction::IntToPtr:
    165     // This inttoptr is a no-op if the integer type is pointer sized.
    166     if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
    167         TLI.getPointerTy())
    168       return MatchAddr(AddrInst->getOperand(0), Depth);
    169     return false;
    170   case Instruction::BitCast:
    171     // BitCast is always a noop, and we can handle it as long as it is
    172     // int->int or pointer->pointer (we don't want int<->fp or something).
    173     if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
    174          AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
    175         // Don't touch identity bitcasts.  These were probably put here by LSR,
    176         // and we don't want to mess around with them.  Assume it knows what it
    177         // is doing.
    178         AddrInst->getOperand(0)->getType() != AddrInst->getType())
    179       return MatchAddr(AddrInst->getOperand(0), Depth);
    180     return false;
    181   case Instruction::Add: {
    182     // Check to see if we can merge in the RHS then the LHS.  If so, we win.
    183     ExtAddrMode BackupAddrMode = AddrMode;
    184     unsigned OldSize = AddrModeInsts.size();
    185     if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
    186         MatchAddr(AddrInst->getOperand(0), Depth+1))
    187       return true;
    188 
    189     // Restore the old addr mode info.
    190     AddrMode = BackupAddrMode;
    191     AddrModeInsts.resize(OldSize);
    192 
    193     // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
    194     if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
    195         MatchAddr(AddrInst->getOperand(1), Depth+1))
    196       return true;
    197 
    198     // Otherwise we definitely can't merge the ADD in.
    199     AddrMode = BackupAddrMode;
    200     AddrModeInsts.resize(OldSize);
    201     break;
    202   }
    203   //case Instruction::Or:
    204   // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
    205   //break;
    206   case Instruction::Mul:
    207   case Instruction::Shl: {
    208     // Can only handle X*C and X << C.
    209     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
    210     if (!RHS) return false;
    211     int64_t Scale = RHS->getSExtValue();
    212     if (Opcode == Instruction::Shl)
    213       Scale = 1LL << Scale;
    214 
    215     return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
    216   }
    217   case Instruction::GetElementPtr: {
    218     // Scan the GEP.  We check it if it contains constant offsets and at most
    219     // one variable offset.
    220     int VariableOperand = -1;
    221     unsigned VariableScale = 0;
    222 
    223     int64_t ConstantOffset = 0;
    224     const TargetData *TD = TLI.getTargetData();
    225     gep_type_iterator GTI = gep_type_begin(AddrInst);
    226     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
    227       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
    228         const StructLayout *SL = TD->getStructLayout(STy);
    229         unsigned Idx =
    230           cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
    231         ConstantOffset += SL->getElementOffset(Idx);
    232       } else {
    233         uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
    234         if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
    235           ConstantOffset += CI->getSExtValue()*TypeSize;
    236         } else if (TypeSize) {  // Scales of zero don't do anything.
    237           // We only allow one variable index at the moment.
    238           if (VariableOperand != -1)
    239             return false;
    240 
    241           // Remember the variable index.
    242           VariableOperand = i;
    243           VariableScale = TypeSize;
    244         }
    245       }
    246     }
    247 
    248     // A common case is for the GEP to only do a constant offset.  In this case,
    249     // just add it to the disp field and check validity.
    250     if (VariableOperand == -1) {
    251       AddrMode.BaseOffs += ConstantOffset;
    252       if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
    253         // Check to see if we can fold the base pointer in too.
    254         if (MatchAddr(AddrInst->getOperand(0), Depth+1))
    255           return true;
    256       }
    257       AddrMode.BaseOffs -= ConstantOffset;
    258       return false;
    259     }
    260 
    261     // Save the valid addressing mode in case we can't match.
    262     ExtAddrMode BackupAddrMode = AddrMode;
    263     unsigned OldSize = AddrModeInsts.size();
    264 
    265     // See if the scale and offset amount is valid for this target.
    266     AddrMode.BaseOffs += ConstantOffset;
    267 
    268     // Match the base operand of the GEP.
    269     if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
    270       // If it couldn't be matched, just stuff the value in a register.
    271       if (AddrMode.HasBaseReg) {
    272         AddrMode = BackupAddrMode;
    273         AddrModeInsts.resize(OldSize);
    274         return false;
    275       }
    276       AddrMode.HasBaseReg = true;
    277       AddrMode.BaseReg = AddrInst->getOperand(0);
    278     }
    279 
    280     // Match the remaining variable portion of the GEP.
    281     if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
    282                           Depth)) {
    283       // If it couldn't be matched, try stuffing the base into a register
    284       // instead of matching it, and retrying the match of the scale.
    285       AddrMode = BackupAddrMode;
    286       AddrModeInsts.resize(OldSize);
    287       if (AddrMode.HasBaseReg)
    288         return false;
    289       AddrMode.HasBaseReg = true;
    290       AddrMode.BaseReg = AddrInst->getOperand(0);
    291       AddrMode.BaseOffs += ConstantOffset;
    292       if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
    293                             VariableScale, Depth)) {
    294         // If even that didn't work, bail.
    295         AddrMode = BackupAddrMode;
    296         AddrModeInsts.resize(OldSize);
    297         return false;
    298       }
    299     }
    300 
    301     return true;
    302   }
    303   }
    304   return false;
    305 }
    306 
    307 /// MatchAddr - If we can, try to add the value of 'Addr' into the current
    308 /// addressing mode.  If Addr can't be added to AddrMode this returns false and
    309 /// leaves AddrMode unmodified.  This assumes that Addr is either a pointer type
    310 /// or intptr_t for the target.
    311 ///
    312 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
    313   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
    314     // Fold in immediates if legal for the target.
    315     AddrMode.BaseOffs += CI->getSExtValue();
    316     if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
    317       return true;
    318     AddrMode.BaseOffs -= CI->getSExtValue();
    319   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
    320     // If this is a global variable, try to fold it into the addressing mode.
    321     if (AddrMode.BaseGV == 0) {
    322       AddrMode.BaseGV = GV;
    323       if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
    324         return true;
    325       AddrMode.BaseGV = 0;
    326     }
    327   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
    328     ExtAddrMode BackupAddrMode = AddrMode;
    329     unsigned OldSize = AddrModeInsts.size();
    330 
    331     // Check to see if it is possible to fold this operation.
    332     if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
    333       // Okay, it's possible to fold this.  Check to see if it is actually
    334       // *profitable* to do so.  We use a simple cost model to avoid increasing
    335       // register pressure too much.
    336       if (I->hasOneUse() ||
    337           IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
    338         AddrModeInsts.push_back(I);
    339         return true;
    340       }
    341 
    342       // It isn't profitable to do this, roll back.
    343       //cerr << "NOT FOLDING: " << *I;
    344       AddrMode = BackupAddrMode;
    345       AddrModeInsts.resize(OldSize);
    346     }
    347   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
    348     if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
    349       return true;
    350   } else if (isa<ConstantPointerNull>(Addr)) {
    351     // Null pointer gets folded without affecting the addressing mode.
    352     return true;
    353   }
    354 
    355   // Worse case, the target should support [reg] addressing modes. :)
    356   if (!AddrMode.HasBaseReg) {
    357     AddrMode.HasBaseReg = true;
    358     AddrMode.BaseReg = Addr;
    359     // Still check for legality in case the target supports [imm] but not [i+r].
    360     if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
    361       return true;
    362     AddrMode.HasBaseReg = false;
    363     AddrMode.BaseReg = 0;
    364   }
    365 
    366   // If the base register is already taken, see if we can do [r+r].
    367   if (AddrMode.Scale == 0) {
    368     AddrMode.Scale = 1;
    369     AddrMode.ScaledReg = Addr;
    370     if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
    371       return true;
    372     AddrMode.Scale = 0;
    373     AddrMode.ScaledReg = 0;
    374   }
    375   // Couldn't match.
    376   return false;
    377 }
    378 
    379 
    380 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
    381 /// inline asm call are due to memory operands.  If so, return true, otherwise
    382 /// return false.
    383 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
    384                                     const TargetLowering &TLI) {
    385   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
    386   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
    387     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
    388 
    389     // Compute the constraint code and ConstraintType to use.
    390     TLI.ComputeConstraintToUse(OpInfo, SDValue());
    391 
    392     // If this asm operand is our Value*, and if it isn't an indirect memory
    393     // operand, we can't fold it!
    394     if (OpInfo.CallOperandVal == OpVal &&
    395         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
    396          !OpInfo.isIndirect))
    397       return false;
    398   }
    399 
    400   return true;
    401 }
    402 
    403 
    404 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
    405 /// memory use.  If we find an obviously non-foldable instruction, return true.
    406 /// Add the ultimately found memory instructions to MemoryUses.
    407 static bool FindAllMemoryUses(Instruction *I,
    408                 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
    409                               SmallPtrSet<Instruction*, 16> &ConsideredInsts,
    410                               const TargetLowering &TLI) {
    411   // If we already considered this instruction, we're done.
    412   if (!ConsideredInsts.insert(I))
    413     return false;
    414 
    415   // If this is an obviously unfoldable instruction, bail out.
    416   if (!MightBeFoldableInst(I))
    417     return true;
    418 
    419   // Loop over all the uses, recursively processing them.
    420   for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
    421        UI != E; ++UI) {
    422     User *U = *UI;
    423 
    424     if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
    425       MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
    426       continue;
    427     }
    428 
    429     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
    430       unsigned opNo = UI.getOperandNo();
    431       if (opNo == 0) return true; // Storing addr, not into addr.
    432       MemoryUses.push_back(std::make_pair(SI, opNo));
    433       continue;
    434     }
    435 
    436     if (CallInst *CI = dyn_cast<CallInst>(U)) {
    437       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
    438       if (!IA) return true;
    439 
    440       // If this is a memory operand, we're cool, otherwise bail out.
    441       if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
    442         return true;
    443       continue;
    444     }
    445 
    446     if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
    447                           TLI))
    448       return true;
    449   }
    450 
    451   return false;
    452 }
    453 
    454 
    455 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
    456 /// the use site that we're folding it into.  If so, there is no cost to
    457 /// include it in the addressing mode.  KnownLive1 and KnownLive2 are two values
    458 /// that we know are live at the instruction already.
    459 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
    460                                                    Value *KnownLive2) {
    461   // If Val is either of the known-live values, we know it is live!
    462   if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
    463     return true;
    464 
    465   // All values other than instructions and arguments (e.g. constants) are live.
    466   if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
    467 
    468   // If Val is a constant sized alloca in the entry block, it is live, this is
    469   // true because it is just a reference to the stack/frame pointer, which is
    470   // live for the whole function.
    471   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
    472     if (AI->isStaticAlloca())
    473       return true;
    474 
    475   // Check to see if this value is already used in the memory instruction's
    476   // block.  If so, it's already live into the block at the very least, so we
    477   // can reasonably fold it.
    478   return Val->isUsedInBasicBlock(MemoryInst->getParent());
    479 }
    480 
    481 
    482 
    483 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
    484 /// mode of the machine to fold the specified instruction into a load or store
    485 /// that ultimately uses it.  However, the specified instruction has multiple
    486 /// uses.  Given this, it may actually increase register pressure to fold it
    487 /// into the load.  For example, consider this code:
    488 ///
    489 ///     X = ...
    490 ///     Y = X+1
    491 ///     use(Y)   -> nonload/store
    492 ///     Z = Y+1
    493 ///     load Z
    494 ///
    495 /// In this case, Y has multiple uses, and can be folded into the load of Z
    496 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
    497 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
    498 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
    499 /// number of computations either.
    500 ///
    501 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
    502 /// X was live across 'load Z' for other reasons, we actually *would* want to
    503 /// fold the addressing mode in the Z case.  This would make Y die earlier.
    504 bool AddressingModeMatcher::
    505 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
    506                                      ExtAddrMode &AMAfter) {
    507   if (IgnoreProfitability) return true;
    508 
    509   // AMBefore is the addressing mode before this instruction was folded into it,
    510   // and AMAfter is the addressing mode after the instruction was folded.  Get
    511   // the set of registers referenced by AMAfter and subtract out those
    512   // referenced by AMBefore: this is the set of values which folding in this
    513   // address extends the lifetime of.
    514   //
    515   // Note that there are only two potential values being referenced here,
    516   // BaseReg and ScaleReg (global addresses are always available, as are any
    517   // folded immediates).
    518   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
    519 
    520   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
    521   // lifetime wasn't extended by adding this instruction.
    522   if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
    523     BaseReg = 0;
    524   if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
    525     ScaledReg = 0;
    526 
    527   // If folding this instruction (and it's subexprs) didn't extend any live
    528   // ranges, we're ok with it.
    529   if (BaseReg == 0 && ScaledReg == 0)
    530     return true;
    531 
    532   // If all uses of this instruction are ultimately load/store/inlineasm's,
    533   // check to see if their addressing modes will include this instruction.  If
    534   // so, we can fold it into all uses, so it doesn't matter if it has multiple
    535   // uses.
    536   SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
    537   SmallPtrSet<Instruction*, 16> ConsideredInsts;
    538   if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
    539     return false;  // Has a non-memory, non-foldable use!
    540 
    541   // Now that we know that all uses of this instruction are part of a chain of
    542   // computation involving only operations that could theoretically be folded
    543   // into a memory use, loop over each of these uses and see if they could
    544   // *actually* fold the instruction.
    545   SmallVector<Instruction*, 32> MatchedAddrModeInsts;
    546   for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
    547     Instruction *User = MemoryUses[i].first;
    548     unsigned OpNo = MemoryUses[i].second;
    549 
    550     // Get the access type of this use.  If the use isn't a pointer, we don't
    551     // know what it accesses.
    552     Value *Address = User->getOperand(OpNo);
    553     if (!Address->getType()->isPointerTy())
    554       return false;
    555     Type *AddressAccessTy =
    556       cast<PointerType>(Address->getType())->getElementType();
    557 
    558     // Do a match against the root of this address, ignoring profitability. This
    559     // will tell us if the addressing mode for the memory operation will
    560     // *actually* cover the shared instruction.
    561     ExtAddrMode Result;
    562     AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
    563                                   MemoryInst, Result);
    564     Matcher.IgnoreProfitability = true;
    565     bool Success = Matcher.MatchAddr(Address, 0);
    566     (void)Success; assert(Success && "Couldn't select *anything*?");
    567 
    568     // If the match didn't cover I, then it won't be shared by it.
    569     if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
    570                   I) == MatchedAddrModeInsts.end())
    571       return false;
    572 
    573     MatchedAddrModeInsts.clear();
    574   }
    575 
    576   return true;
    577 }
    578