Home | History | Annotate | Download | only in CodeGen
      1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/CodeGen/Analysis.h"
     15 #include "llvm/Analysis/ValueTracking.h"
     16 #include "llvm/CodeGen/MachineFunction.h"
     17 #include "llvm/CodeGen/MachineModuleInfo.h"
     18 #include "llvm/CodeGen/SelectionDAG.h"
     19 #include "llvm/IR/DataLayout.h"
     20 #include "llvm/IR/DerivedTypes.h"
     21 #include "llvm/IR/Function.h"
     22 #include "llvm/IR/Instructions.h"
     23 #include "llvm/IR/IntrinsicInst.h"
     24 #include "llvm/IR/LLVMContext.h"
     25 #include "llvm/IR/Module.h"
     26 #include "llvm/Support/ErrorHandling.h"
     27 #include "llvm/Support/MathExtras.h"
     28 #include "llvm/Target/TargetLowering.h"
     29 #include "llvm/Target/TargetInstrInfo.h"
     30 #include "llvm/Target/TargetSubtargetInfo.h"
     31 #include "llvm/Transforms/Utils/GlobalStatus.h"
     32 
     33 using namespace llvm;
     34 
     35 /// Compute the linearized index of a member in a nested aggregate/struct/array
     36 /// by recursing and accumulating CurIndex as long as there are indices in the
     37 /// index list.
     38 unsigned llvm::ComputeLinearIndex(Type *Ty,
     39                                   const unsigned *Indices,
     40                                   const unsigned *IndicesEnd,
     41                                   unsigned CurIndex) {
     42   // Base case: We're done.
     43   if (Indices && Indices == IndicesEnd)
     44     return CurIndex;
     45 
     46   // Given a struct type, recursively traverse the elements.
     47   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     48     for (StructType::element_iterator EB = STy->element_begin(),
     49                                       EI = EB,
     50                                       EE = STy->element_end();
     51         EI != EE; ++EI) {
     52       if (Indices && *Indices == unsigned(EI - EB))
     53         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
     54       CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
     55     }
     56     assert(!Indices && "Unexpected out of bound");
     57     return CurIndex;
     58   }
     59   // Given an array type, recursively traverse the elements.
     60   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     61     Type *EltTy = ATy->getElementType();
     62     unsigned NumElts = ATy->getNumElements();
     63     // Compute the Linear offset when jumping one element of the array
     64     unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
     65     if (Indices) {
     66       assert(*Indices < NumElts && "Unexpected out of bound");
     67       // If the indice is inside the array, compute the index to the requested
     68       // elt and recurse inside the element with the end of the indices list
     69       CurIndex += EltLinearOffset* *Indices;
     70       return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
     71     }
     72     CurIndex += EltLinearOffset*NumElts;
     73     return CurIndex;
     74   }
     75   // We haven't found the type we're looking for, so keep searching.
     76   return CurIndex + 1;
     77 }
     78 
     79 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
     80 /// EVTs that represent all the individual underlying
     81 /// non-aggregate types that comprise it.
     82 ///
     83 /// If Offsets is non-null, it points to a vector to be filled in
     84 /// with the in-memory offsets of each of the individual values.
     85 ///
     86 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
     87                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
     88                            SmallVectorImpl<uint64_t> *Offsets,
     89                            uint64_t StartingOffset) {
     90   // Given a struct type, recursively traverse the elements.
     91   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     92     const StructLayout *SL = DL.getStructLayout(STy);
     93     for (StructType::element_iterator EB = STy->element_begin(),
     94                                       EI = EB,
     95                                       EE = STy->element_end();
     96          EI != EE; ++EI)
     97       ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
     98                       StartingOffset + SL->getElementOffset(EI - EB));
     99     return;
    100   }
    101   // Given an array type, recursively traverse the elements.
    102   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
    103     Type *EltTy = ATy->getElementType();
    104     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
    105     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
    106       ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
    107                       StartingOffset + i * EltSize);
    108     return;
    109   }
    110   // Interpret void as zero return values.
    111   if (Ty->isVoidTy())
    112     return;
    113   // Base case: we can get an EVT for this LLVM IR type.
    114   ValueVTs.push_back(TLI.getValueType(DL, Ty));
    115   if (Offsets)
    116     Offsets->push_back(StartingOffset);
    117 }
    118 
    119 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
    120 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
    121   V = V->stripPointerCasts();
    122   GlobalValue *GV = dyn_cast<GlobalValue>(V);
    123   GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
    124 
    125   if (Var && Var->getName() == "llvm.eh.catch.all.value") {
    126     assert(Var->hasInitializer() &&
    127            "The EH catch-all value must have an initializer");
    128     Value *Init = Var->getInitializer();
    129     GV = dyn_cast<GlobalValue>(Init);
    130     if (!GV) V = cast<ConstantPointerNull>(Init);
    131   }
    132 
    133   assert((GV || isa<ConstantPointerNull>(V)) &&
    134          "TypeInfo must be a global variable or NULL");
    135   return GV;
    136 }
    137 
    138 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
    139 /// processed uses a memory 'm' constraint.
    140 bool
    141 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
    142                                 const TargetLowering &TLI) {
    143   for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
    144     InlineAsm::ConstraintInfo &CI = CInfos[i];
    145     for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
    146       TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
    147       if (CType == TargetLowering::C_Memory)
    148         return true;
    149     }
    150 
    151     // Indirect operand accesses access memory.
    152     if (CI.isIndirect)
    153       return true;
    154   }
    155 
    156   return false;
    157 }
    158 
    159 /// getFCmpCondCode - Return the ISD condition code corresponding to
    160 /// the given LLVM IR floating-point condition code.  This includes
    161 /// consideration of global floating-point math flags.
    162 ///
    163 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
    164   switch (Pred) {
    165   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
    166   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
    167   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
    168   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
    169   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
    170   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
    171   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
    172   case FCmpInst::FCMP_ORD:   return ISD::SETO;
    173   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
    174   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
    175   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
    176   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
    177   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
    178   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
    179   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
    180   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
    181   default: llvm_unreachable("Invalid FCmp predicate opcode!");
    182   }
    183 }
    184 
    185 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
    186   switch (CC) {
    187     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
    188     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
    189     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
    190     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
    191     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
    192     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
    193     default: return CC;
    194   }
    195 }
    196 
    197 /// getICmpCondCode - Return the ISD condition code corresponding to
    198 /// the given LLVM IR integer condition code.
    199 ///
    200 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
    201   switch (Pred) {
    202   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
    203   case ICmpInst::ICMP_NE:  return ISD::SETNE;
    204   case ICmpInst::ICMP_SLE: return ISD::SETLE;
    205   case ICmpInst::ICMP_ULE: return ISD::SETULE;
    206   case ICmpInst::ICMP_SGE: return ISD::SETGE;
    207   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
    208   case ICmpInst::ICMP_SLT: return ISD::SETLT;
    209   case ICmpInst::ICMP_ULT: return ISD::SETULT;
    210   case ICmpInst::ICMP_SGT: return ISD::SETGT;
    211   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
    212   default:
    213     llvm_unreachable("Invalid ICmp predicate opcode!");
    214   }
    215 }
    216 
    217 static bool isNoopBitcast(Type *T1, Type *T2,
    218                           const TargetLoweringBase& TLI) {
    219   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
    220          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
    221           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
    222 }
    223 
    224 /// Look through operations that will be free to find the earliest source of
    225 /// this value.
    226 ///
    227 /// @param ValLoc If V has aggegate type, we will be interested in a particular
    228 /// scalar component. This records its address; the reverse of this list gives a
    229 /// sequence of indices appropriate for an extractvalue to locate the important
    230 /// value. This value is updated during the function and on exit will indicate
    231 /// similar information for the Value returned.
    232 ///
    233 /// @param DataBits If this function looks through truncate instructions, this
    234 /// will record the smallest size attained.
    235 static const Value *getNoopInput(const Value *V,
    236                                  SmallVectorImpl<unsigned> &ValLoc,
    237                                  unsigned &DataBits,
    238                                  const TargetLoweringBase &TLI,
    239                                  const DataLayout &DL) {
    240   while (true) {
    241     // Try to look through V1; if V1 is not an instruction, it can't be looked
    242     // through.
    243     const Instruction *I = dyn_cast<Instruction>(V);
    244     if (!I || I->getNumOperands() == 0) return V;
    245     const Value *NoopInput = nullptr;
    246 
    247     Value *Op = I->getOperand(0);
    248     if (isa<BitCastInst>(I)) {
    249       // Look through truly no-op bitcasts.
    250       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
    251         NoopInput = Op;
    252     } else if (isa<GetElementPtrInst>(I)) {
    253       // Look through getelementptr
    254       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
    255         NoopInput = Op;
    256     } else if (isa<IntToPtrInst>(I)) {
    257       // Look through inttoptr.
    258       // Make sure this isn't a truncating or extending cast.  We could
    259       // support this eventually, but don't bother for now.
    260       if (!isa<VectorType>(I->getType()) &&
    261           DL.getPointerSizeInBits() ==
    262               cast<IntegerType>(Op->getType())->getBitWidth())
    263         NoopInput = Op;
    264     } else if (isa<PtrToIntInst>(I)) {
    265       // Look through ptrtoint.
    266       // Make sure this isn't a truncating or extending cast.  We could
    267       // support this eventually, but don't bother for now.
    268       if (!isa<VectorType>(I->getType()) &&
    269           DL.getPointerSizeInBits() ==
    270               cast<IntegerType>(I->getType())->getBitWidth())
    271         NoopInput = Op;
    272     } else if (isa<TruncInst>(I) &&
    273                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
    274       DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
    275       NoopInput = Op;
    276     } else if (isa<CallInst>(I)) {
    277       // Look through call (skipping callee)
    278       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
    279            i != e; ++i) {
    280         unsigned attrInd = i - I->op_begin() + 1;
    281         if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    282             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    283           NoopInput = *i;
    284           break;
    285         }
    286       }
    287     } else if (isa<InvokeInst>(I)) {
    288       // Look through invoke (skipping BB, BB, Callee)
    289       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
    290            i != e; ++i) {
    291         unsigned attrInd = i - I->op_begin() + 1;
    292         if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    293             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    294           NoopInput = *i;
    295           break;
    296         }
    297       }
    298     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
    299       // Value may come from either the aggregate or the scalar
    300       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
    301       if (ValLoc.size() >= InsertLoc.size() &&
    302           std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
    303         // The type being inserted is a nested sub-type of the aggregate; we
    304         // have to remove those initial indices to get the location we're
    305         // interested in for the operand.
    306         ValLoc.resize(ValLoc.size() - InsertLoc.size());
    307         NoopInput = IVI->getInsertedValueOperand();
    308       } else {
    309         // The struct we're inserting into has the value we're interested in, no
    310         // change of address.
    311         NoopInput = Op;
    312       }
    313     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
    314       // The part we're interested in will inevitably be some sub-section of the
    315       // previous aggregate. Combine the two paths to obtain the true address of
    316       // our element.
    317       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
    318       ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
    319       NoopInput = Op;
    320     }
    321     // Terminate if we couldn't find anything to look through.
    322     if (!NoopInput)
    323       return V;
    324 
    325     V = NoopInput;
    326   }
    327 }
    328 
    329 /// Return true if this scalar return value only has bits discarded on its path
    330 /// from the "tail call" to the "ret". This includes the obvious noop
    331 /// instructions handled by getNoopInput above as well as free truncations (or
    332 /// extensions prior to the call).
    333 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
    334                                  SmallVectorImpl<unsigned> &RetIndices,
    335                                  SmallVectorImpl<unsigned> &CallIndices,
    336                                  bool AllowDifferingSizes,
    337                                  const TargetLoweringBase &TLI,
    338                                  const DataLayout &DL) {
    339 
    340   // Trace the sub-value needed by the return value as far back up the graph as
    341   // possible, in the hope that it will intersect with the value produced by the
    342   // call. In the simple case with no "returned" attribute, the hope is actually
    343   // that we end up back at the tail call instruction itself.
    344   unsigned BitsRequired = UINT_MAX;
    345   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
    346 
    347   // If this slot in the value returned is undef, it doesn't matter what the
    348   // call puts there, it'll be fine.
    349   if (isa<UndefValue>(RetVal))
    350     return true;
    351 
    352   // Now do a similar search up through the graph to find where the value
    353   // actually returned by the "tail call" comes from. In the simple case without
    354   // a "returned" attribute, the search will be blocked immediately and the loop
    355   // a Noop.
    356   unsigned BitsProvided = UINT_MAX;
    357   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
    358 
    359   // There's no hope if we can't actually trace them to (the same part of!) the
    360   // same value.
    361   if (CallVal != RetVal || CallIndices != RetIndices)
    362     return false;
    363 
    364   // However, intervening truncates may have made the call non-tail. Make sure
    365   // all the bits that are needed by the "ret" have been provided by the "tail
    366   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
    367   // extensions too.
    368   if (BitsProvided < BitsRequired ||
    369       (!AllowDifferingSizes && BitsProvided != BitsRequired))
    370     return false;
    371 
    372   return true;
    373 }
    374 
    375 /// For an aggregate type, determine whether a given index is within bounds or
    376 /// not.
    377 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
    378   if (ArrayType *AT = dyn_cast<ArrayType>(T))
    379     return Idx < AT->getNumElements();
    380 
    381   return Idx < cast<StructType>(T)->getNumElements();
    382 }
    383 
    384 /// Move the given iterators to the next leaf type in depth first traversal.
    385 ///
    386 /// Performs a depth-first traversal of the type as specified by its arguments,
    387 /// stopping at the next leaf node (which may be a legitimate scalar type or an
    388 /// empty struct or array).
    389 ///
    390 /// @param SubTypes List of the partial components making up the type from
    391 /// outermost to innermost non-empty aggregate. The element currently
    392 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
    393 ///
    394 /// @param Path Set of extractvalue indices leading from the outermost type
    395 /// (SubTypes[0]) to the leaf node currently represented.
    396 ///
    397 /// @returns true if a new type was found, false otherwise. Calling this
    398 /// function again on a finished iterator will repeatedly return
    399 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
    400 /// aggregate or a non-aggregate
    401 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
    402                                   SmallVectorImpl<unsigned> &Path) {
    403   // First march back up the tree until we can successfully increment one of the
    404   // coordinates in Path.
    405   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
    406     Path.pop_back();
    407     SubTypes.pop_back();
    408   }
    409 
    410   // If we reached the top, then the iterator is done.
    411   if (Path.empty())
    412     return false;
    413 
    414   // We know there's *some* valid leaf now, so march back down the tree picking
    415   // out the left-most element at each node.
    416   ++Path.back();
    417   Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
    418   while (DeeperType->isAggregateType()) {
    419     CompositeType *CT = cast<CompositeType>(DeeperType);
    420     if (!indexReallyValid(CT, 0))
    421       return true;
    422 
    423     SubTypes.push_back(CT);
    424     Path.push_back(0);
    425 
    426     DeeperType = CT->getTypeAtIndex(0U);
    427   }
    428 
    429   return true;
    430 }
    431 
    432 /// Find the first non-empty, scalar-like type in Next and setup the iterator
    433 /// components.
    434 ///
    435 /// Assuming Next is an aggregate of some kind, this function will traverse the
    436 /// tree from left to right (i.e. depth-first) looking for the first
    437 /// non-aggregate type which will play a role in function return.
    438 ///
    439 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
    440 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
    441 /// i32 in that type.
    442 static bool firstRealType(Type *Next,
    443                           SmallVectorImpl<CompositeType *> &SubTypes,
    444                           SmallVectorImpl<unsigned> &Path) {
    445   // First initialise the iterator components to the first "leaf" node
    446   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
    447   // despite nominally being an aggregate).
    448   while (Next->isAggregateType() &&
    449          indexReallyValid(cast<CompositeType>(Next), 0)) {
    450     SubTypes.push_back(cast<CompositeType>(Next));
    451     Path.push_back(0);
    452     Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
    453   }
    454 
    455   // If there's no Path now, Next was originally scalar already (or empty
    456   // leaf). We're done.
    457   if (Path.empty())
    458     return true;
    459 
    460   // Otherwise, use normal iteration to keep looking through the tree until we
    461   // find a non-aggregate type.
    462   while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
    463     if (!advanceToNextLeafType(SubTypes, Path))
    464       return false;
    465   }
    466 
    467   return true;
    468 }
    469 
    470 /// Set the iterator data-structures to the next non-empty, non-aggregate
    471 /// subtype.
    472 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
    473                          SmallVectorImpl<unsigned> &Path) {
    474   do {
    475     if (!advanceToNextLeafType(SubTypes, Path))
    476       return false;
    477 
    478     assert(!Path.empty() && "found a leaf but didn't set the path?");
    479   } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
    480 
    481   return true;
    482 }
    483 
    484 
    485 /// Test if the given instruction is in a position to be optimized
    486 /// with a tail-call. This roughly means that it's in a block with
    487 /// a return and there's nothing that needs to be scheduled
    488 /// between it and the return.
    489 ///
    490 /// This function only tests target-independent requirements.
    491 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
    492   const Instruction *I = CS.getInstruction();
    493   const BasicBlock *ExitBB = I->getParent();
    494   const TerminatorInst *Term = ExitBB->getTerminator();
    495   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
    496 
    497   // The block must end in a return statement or unreachable.
    498   //
    499   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
    500   // an unreachable, for now. The way tailcall optimization is currently
    501   // implemented means it will add an epilogue followed by a jump. That is
    502   // not profitable. Also, if the callee is a special function (e.g.
    503   // longjmp on x86), it can end up causing miscompilation that has not
    504   // been fully understood.
    505   if (!Ret &&
    506       (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
    507     return false;
    508 
    509   // If I will have a chain, make sure no other instruction that will have a
    510   // chain interposes between I and the return.
    511   if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
    512       !isSafeToSpeculativelyExecute(I))
    513     for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
    514       if (&*BBI == I)
    515         break;
    516       // Debug info intrinsics do not get in the way of tail call optimization.
    517       if (isa<DbgInfoIntrinsic>(BBI))
    518         continue;
    519       if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
    520           !isSafeToSpeculativelyExecute(&*BBI))
    521         return false;
    522     }
    523 
    524   const Function *F = ExitBB->getParent();
    525   return returnTypeIsEligibleForTailCall(
    526       F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
    527 }
    528 
    529 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
    530                                            const Instruction *I,
    531                                            const ReturnInst *Ret,
    532                                            const TargetLoweringBase &TLI) {
    533   // If the block ends with a void return or unreachable, it doesn't matter
    534   // what the call's return type is.
    535   if (!Ret || Ret->getNumOperands() == 0) return true;
    536 
    537   // If the return value is undef, it doesn't matter what the call's
    538   // return type is.
    539   if (isa<UndefValue>(Ret->getOperand(0))) return true;
    540 
    541   // Make sure the attributes attached to each return are compatible.
    542   AttrBuilder CallerAttrs(F->getAttributes(),
    543                           AttributeSet::ReturnIndex);
    544   AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
    545                           AttributeSet::ReturnIndex);
    546 
    547   // Noalias is completely benign as far as calling convention goes, it
    548   // shouldn't affect whether the call is a tail call.
    549   CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
    550   CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
    551 
    552   bool AllowDifferingSizes = true;
    553   if (CallerAttrs.contains(Attribute::ZExt)) {
    554     if (!CalleeAttrs.contains(Attribute::ZExt))
    555       return false;
    556 
    557     AllowDifferingSizes = false;
    558     CallerAttrs.removeAttribute(Attribute::ZExt);
    559     CalleeAttrs.removeAttribute(Attribute::ZExt);
    560   } else if (CallerAttrs.contains(Attribute::SExt)) {
    561     if (!CalleeAttrs.contains(Attribute::SExt))
    562       return false;
    563 
    564     AllowDifferingSizes = false;
    565     CallerAttrs.removeAttribute(Attribute::SExt);
    566     CalleeAttrs.removeAttribute(Attribute::SExt);
    567   }
    568 
    569   // If they're still different, there's some facet we don't understand
    570   // (currently only "inreg", but in future who knows). It may be OK but the
    571   // only safe option is to reject the tail call.
    572   if (CallerAttrs != CalleeAttrs)
    573     return false;
    574 
    575   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
    576   SmallVector<unsigned, 4> RetPath, CallPath;
    577   SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
    578 
    579   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
    580   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
    581 
    582   // Nothing's actually returned, it doesn't matter what the callee put there
    583   // it's a valid tail call.
    584   if (RetEmpty)
    585     return true;
    586 
    587   // Iterate pairwise through each of the value types making up the tail call
    588   // and the corresponding return. For each one we want to know whether it's
    589   // essentially going directly from the tail call to the ret, via operations
    590   // that end up not generating any code.
    591   //
    592   // We allow a certain amount of covariance here. For example it's permitted
    593   // for the tail call to define more bits than the ret actually cares about
    594   // (e.g. via a truncate).
    595   do {
    596     if (CallEmpty) {
    597       // We've exhausted the values produced by the tail call instruction, the
    598       // rest are essentially undef. The type doesn't really matter, but we need
    599       // *something*.
    600       Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
    601       CallVal = UndefValue::get(SlotType);
    602     }
    603 
    604     // The manipulations performed when we're looking through an insertvalue or
    605     // an extractvalue would happen at the front of the RetPath list, so since
    606     // we have to copy it anyway it's more efficient to create a reversed copy.
    607     SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
    608     SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
    609 
    610     // Finally, we can check whether the value produced by the tail call at this
    611     // index is compatible with the value we return.
    612     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
    613                               AllowDifferingSizes, TLI,
    614                               F->getParent()->getDataLayout()))
    615       return false;
    616 
    617     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
    618   } while(nextRealType(RetSubTypes, RetPath));
    619 
    620   return true;
    621 }
    622 
    623 bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) {
    624   if (!GV->hasLinkOnceODRLinkage())
    625     return false;
    626 
    627   if (GV->hasUnnamedAddr())
    628     return true;
    629 
    630   // If it is a non constant variable, it needs to be uniqued across shared
    631   // objects.
    632   if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
    633     if (!Var->isConstant())
    634       return false;
    635   }
    636 
    637   // An alias can point to a variable. We could try to resolve the alias to
    638   // decide, but for now just don't hide them.
    639   if (isa<GlobalAlias>(GV))
    640     return false;
    641 
    642   GlobalStatus GS;
    643   if (GlobalStatus::analyzeGlobal(GV, GS))
    644     return false;
    645 
    646   return !GS.IsCompared;
    647 }
    648 
    649 static void collectFuncletMembers(
    650     DenseMap<const MachineBasicBlock *, int> &FuncletMembership, int Funclet,
    651     const MachineBasicBlock *MBB) {
    652   // Add this MBB to our funclet.
    653   auto P = FuncletMembership.insert(std::make_pair(MBB, Funclet));
    654 
    655   // Don't revisit blocks.
    656   if (!P.second) {
    657     assert(P.first->second == Funclet && "MBB is part of two funclets!");
    658     return;
    659   }
    660 
    661   bool IsReturn = false;
    662   int NumTerminators = 0;
    663   for (const MachineInstr &MI : MBB->terminators()) {
    664     IsReturn |= MI.isReturn();
    665     ++NumTerminators;
    666   }
    667   assert((!IsReturn || NumTerminators == 1) &&
    668          "Expected only one terminator when a return is present!");
    669 
    670   // Returns are boundaries where funclet transfer can occur, don't follow
    671   // successors.
    672   if (IsReturn)
    673     return;
    674 
    675   for (const MachineBasicBlock *SMBB : MBB->successors())
    676     if (!SMBB->isEHPad())
    677       collectFuncletMembers(FuncletMembership, Funclet, SMBB);
    678 }
    679 
    680 DenseMap<const MachineBasicBlock *, int>
    681 llvm::getFuncletMembership(const MachineFunction &MF) {
    682   DenseMap<const MachineBasicBlock *, int> FuncletMembership;
    683 
    684   // We don't have anything to do if there aren't any EH pads.
    685   if (!MF.getMMI().hasEHFunclets())
    686     return FuncletMembership;
    687 
    688   int EntryBBNumber = MF.front().getNumber();
    689   bool IsSEH = isAsynchronousEHPersonality(
    690       classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
    691 
    692   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
    693   SmallVector<const MachineBasicBlock *, 16> FuncletBlocks;
    694   SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
    695   SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
    696   SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
    697   for (const MachineBasicBlock &MBB : MF) {
    698     if (MBB.isEHFuncletEntry()) {
    699       FuncletBlocks.push_back(&MBB);
    700     } else if (IsSEH && MBB.isEHPad()) {
    701       SEHCatchPads.push_back(&MBB);
    702     } else if (MBB.pred_empty()) {
    703       UnreachableBlocks.push_back(&MBB);
    704     }
    705 
    706     MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
    707     // CatchPads are not funclets for SEH so do not consider CatchRet to
    708     // transfer control to another funclet.
    709     if (MBBI->getOpcode() != TII->getCatchReturnOpcode())
    710       continue;
    711 
    712     // FIXME: SEH CatchPads are not necessarily in the parent function:
    713     // they could be inside a finally block.
    714     const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
    715     const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
    716     CatchRetSuccessors.push_back(
    717         {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
    718   }
    719 
    720   // We don't have anything to do if there aren't any EH pads.
    721   if (FuncletBlocks.empty())
    722     return FuncletMembership;
    723 
    724   // Identify all the basic blocks reachable from the function entry.
    725   collectFuncletMembers(FuncletMembership, EntryBBNumber, &MF.front());
    726   // All blocks not part of a funclet are in the parent function.
    727   for (const MachineBasicBlock *MBB : UnreachableBlocks)
    728     collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
    729   // Next, identify all the blocks inside the funclets.
    730   for (const MachineBasicBlock *MBB : FuncletBlocks)
    731     collectFuncletMembers(FuncletMembership, MBB->getNumber(), MBB);
    732   // SEH CatchPads aren't really funclets, handle them separately.
    733   for (const MachineBasicBlock *MBB : SEHCatchPads)
    734     collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
    735   // Finally, identify all the targets of a catchret.
    736   for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
    737        CatchRetSuccessors)
    738     collectFuncletMembers(FuncletMembership, CatchRetPair.second,
    739                           CatchRetPair.first);
    740   return FuncletMembership;
    741 }
    742