Home | History | Annotate | Download | only in CodeGen
      1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/CodeGen/Analysis.h"
     15 #include "llvm/Analysis/ValueTracking.h"
     16 #include "llvm/CodeGen/MachineFunction.h"
     17 #include "llvm/CodeGen/SelectionDAG.h"
     18 #include "llvm/IR/DataLayout.h"
     19 #include "llvm/IR/DerivedTypes.h"
     20 #include "llvm/IR/Function.h"
     21 #include "llvm/IR/Instructions.h"
     22 #include "llvm/IR/IntrinsicInst.h"
     23 #include "llvm/IR/LLVMContext.h"
     24 #include "llvm/IR/Module.h"
     25 #include "llvm/Support/ErrorHandling.h"
     26 #include "llvm/Support/MathExtras.h"
     27 #include "llvm/Target/TargetLowering.h"
     28 #include "llvm/Target/TargetSubtargetInfo.h"
     29 #include "llvm/Transforms/Utils/GlobalStatus.h"
     30 
     31 using namespace llvm;
     32 
     33 /// Compute the linearized index of a member in a nested aggregate/struct/array
     34 /// by recursing and accumulating CurIndex as long as there are indices in the
     35 /// index list.
     36 unsigned llvm::ComputeLinearIndex(Type *Ty,
     37                                   const unsigned *Indices,
     38                                   const unsigned *IndicesEnd,
     39                                   unsigned CurIndex) {
     40   // Base case: We're done.
     41   if (Indices && Indices == IndicesEnd)
     42     return CurIndex;
     43 
     44   // Given a struct type, recursively traverse the elements.
     45   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     46     for (StructType::element_iterator EB = STy->element_begin(),
     47                                       EI = EB,
     48                                       EE = STy->element_end();
     49         EI != EE; ++EI) {
     50       if (Indices && *Indices == unsigned(EI - EB))
     51         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
     52       CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
     53     }
     54     assert(!Indices && "Unexpected out of bound");
     55     return CurIndex;
     56   }
     57   // Given an array type, recursively traverse the elements.
     58   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     59     Type *EltTy = ATy->getElementType();
     60     unsigned NumElts = ATy->getNumElements();
     61     // Compute the Linear offset when jumping one element of the array
     62     unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
     63     if (Indices) {
     64       assert(*Indices < NumElts && "Unexpected out of bound");
     65       // If the indice is inside the array, compute the index to the requested
     66       // elt and recurse inside the element with the end of the indices list
     67       CurIndex += EltLinearOffset* *Indices;
     68       return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
     69     }
     70     CurIndex += EltLinearOffset*NumElts;
     71     return CurIndex;
     72   }
     73   // We haven't found the type we're looking for, so keep searching.
     74   return CurIndex + 1;
     75 }
     76 
     77 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
     78 /// EVTs that represent all the individual underlying
     79 /// non-aggregate types that comprise it.
     80 ///
     81 /// If Offsets is non-null, it points to a vector to be filled in
     82 /// with the in-memory offsets of each of the individual values.
     83 ///
     84 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
     85                            SmallVectorImpl<EVT> &ValueVTs,
     86                            SmallVectorImpl<uint64_t> *Offsets,
     87                            uint64_t StartingOffset) {
     88   // Given a struct type, recursively traverse the elements.
     89   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     90     const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
     91     for (StructType::element_iterator EB = STy->element_begin(),
     92                                       EI = EB,
     93                                       EE = STy->element_end();
     94          EI != EE; ++EI)
     95       ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
     96                       StartingOffset + SL->getElementOffset(EI - EB));
     97     return;
     98   }
     99   // Given an array type, recursively traverse the elements.
    100   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
    101     Type *EltTy = ATy->getElementType();
    102     uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
    103     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
    104       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
    105                       StartingOffset + i * EltSize);
    106     return;
    107   }
    108   // Interpret void as zero return values.
    109   if (Ty->isVoidTy())
    110     return;
    111   // Base case: we can get an EVT for this LLVM IR type.
    112   ValueVTs.push_back(TLI.getValueType(Ty));
    113   if (Offsets)
    114     Offsets->push_back(StartingOffset);
    115 }
    116 
    117 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
    118 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
    119   V = V->stripPointerCasts();
    120   GlobalValue *GV = dyn_cast<GlobalValue>(V);
    121   GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
    122 
    123   if (Var && Var->getName() == "llvm.eh.catch.all.value") {
    124     assert(Var->hasInitializer() &&
    125            "The EH catch-all value must have an initializer");
    126     Value *Init = Var->getInitializer();
    127     GV = dyn_cast<GlobalValue>(Init);
    128     if (!GV) V = cast<ConstantPointerNull>(Init);
    129   }
    130 
    131   assert((GV || isa<ConstantPointerNull>(V)) &&
    132          "TypeInfo must be a global variable or NULL");
    133   return GV;
    134 }
    135 
    136 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
    137 /// processed uses a memory 'm' constraint.
    138 bool
    139 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
    140                                 const TargetLowering &TLI) {
    141   for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
    142     InlineAsm::ConstraintInfo &CI = CInfos[i];
    143     for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
    144       TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
    145       if (CType == TargetLowering::C_Memory)
    146         return true;
    147     }
    148 
    149     // Indirect operand accesses access memory.
    150     if (CI.isIndirect)
    151       return true;
    152   }
    153 
    154   return false;
    155 }
    156 
    157 /// getFCmpCondCode - Return the ISD condition code corresponding to
    158 /// the given LLVM IR floating-point condition code.  This includes
    159 /// consideration of global floating-point math flags.
    160 ///
    161 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
    162   switch (Pred) {
    163   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
    164   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
    165   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
    166   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
    167   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
    168   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
    169   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
    170   case FCmpInst::FCMP_ORD:   return ISD::SETO;
    171   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
    172   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
    173   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
    174   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
    175   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
    176   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
    177   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
    178   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
    179   default: llvm_unreachable("Invalid FCmp predicate opcode!");
    180   }
    181 }
    182 
    183 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
    184   switch (CC) {
    185     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
    186     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
    187     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
    188     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
    189     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
    190     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
    191     default: return CC;
    192   }
    193 }
    194 
    195 /// getICmpCondCode - Return the ISD condition code corresponding to
    196 /// the given LLVM IR integer condition code.
    197 ///
    198 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
    199   switch (Pred) {
    200   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
    201   case ICmpInst::ICMP_NE:  return ISD::SETNE;
    202   case ICmpInst::ICMP_SLE: return ISD::SETLE;
    203   case ICmpInst::ICMP_ULE: return ISD::SETULE;
    204   case ICmpInst::ICMP_SGE: return ISD::SETGE;
    205   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
    206   case ICmpInst::ICMP_SLT: return ISD::SETLT;
    207   case ICmpInst::ICMP_ULT: return ISD::SETULT;
    208   case ICmpInst::ICMP_SGT: return ISD::SETGT;
    209   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
    210   default:
    211     llvm_unreachable("Invalid ICmp predicate opcode!");
    212   }
    213 }
    214 
    215 static bool isNoopBitcast(Type *T1, Type *T2,
    216                           const TargetLoweringBase& TLI) {
    217   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
    218          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
    219           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
    220 }
    221 
    222 /// Look through operations that will be free to find the earliest source of
    223 /// this value.
    224 ///
    225 /// @param ValLoc If V has aggegate type, we will be interested in a particular
    226 /// scalar component. This records its address; the reverse of this list gives a
    227 /// sequence of indices appropriate for an extractvalue to locate the important
    228 /// value. This value is updated during the function and on exit will indicate
    229 /// similar information for the Value returned.
    230 ///
    231 /// @param DataBits If this function looks through truncate instructions, this
    232 /// will record the smallest size attained.
    233 static const Value *getNoopInput(const Value *V,
    234                                  SmallVectorImpl<unsigned> &ValLoc,
    235                                  unsigned &DataBits,
    236                                  const TargetLoweringBase &TLI) {
    237   while (true) {
    238     // Try to look through V1; if V1 is not an instruction, it can't be looked
    239     // through.
    240     const Instruction *I = dyn_cast<Instruction>(V);
    241     if (!I || I->getNumOperands() == 0) return V;
    242     const Value *NoopInput = nullptr;
    243 
    244     Value *Op = I->getOperand(0);
    245     if (isa<BitCastInst>(I)) {
    246       // Look through truly no-op bitcasts.
    247       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
    248         NoopInput = Op;
    249     } else if (isa<GetElementPtrInst>(I)) {
    250       // Look through getelementptr
    251       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
    252         NoopInput = Op;
    253     } else if (isa<IntToPtrInst>(I)) {
    254       // Look through inttoptr.
    255       // Make sure this isn't a truncating or extending cast.  We could
    256       // support this eventually, but don't bother for now.
    257       if (!isa<VectorType>(I->getType()) &&
    258           TLI.getPointerTy().getSizeInBits() ==
    259           cast<IntegerType>(Op->getType())->getBitWidth())
    260         NoopInput = Op;
    261     } else if (isa<PtrToIntInst>(I)) {
    262       // Look through ptrtoint.
    263       // Make sure this isn't a truncating or extending cast.  We could
    264       // support this eventually, but don't bother for now.
    265       if (!isa<VectorType>(I->getType()) &&
    266           TLI.getPointerTy().getSizeInBits() ==
    267           cast<IntegerType>(I->getType())->getBitWidth())
    268         NoopInput = Op;
    269     } else if (isa<TruncInst>(I) &&
    270                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
    271       DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
    272       NoopInput = Op;
    273     } else if (isa<CallInst>(I)) {
    274       // Look through call (skipping callee)
    275       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
    276            i != e; ++i) {
    277         unsigned attrInd = i - I->op_begin() + 1;
    278         if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    279             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    280           NoopInput = *i;
    281           break;
    282         }
    283       }
    284     } else if (isa<InvokeInst>(I)) {
    285       // Look through invoke (skipping BB, BB, Callee)
    286       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
    287            i != e; ++i) {
    288         unsigned attrInd = i - I->op_begin() + 1;
    289         if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    290             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    291           NoopInput = *i;
    292           break;
    293         }
    294       }
    295     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
    296       // Value may come from either the aggregate or the scalar
    297       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
    298       if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
    299                      ValLoc.rbegin())) {
    300         // The type being inserted is a nested sub-type of the aggregate; we
    301         // have to remove those initial indices to get the location we're
    302         // interested in for the operand.
    303         ValLoc.resize(ValLoc.size() - InsertLoc.size());
    304         NoopInput = IVI->getInsertedValueOperand();
    305       } else {
    306         // The struct we're inserting into has the value we're interested in, no
    307         // change of address.
    308         NoopInput = Op;
    309       }
    310     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
    311       // The part we're interested in will inevitably be some sub-section of the
    312       // previous aggregate. Combine the two paths to obtain the true address of
    313       // our element.
    314       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
    315       ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
    316       NoopInput = Op;
    317     }
    318     // Terminate if we couldn't find anything to look through.
    319     if (!NoopInput)
    320       return V;
    321 
    322     V = NoopInput;
    323   }
    324 }
    325 
    326 /// Return true if this scalar return value only has bits discarded on its path
    327 /// from the "tail call" to the "ret". This includes the obvious noop
    328 /// instructions handled by getNoopInput above as well as free truncations (or
    329 /// extensions prior to the call).
    330 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
    331                                  SmallVectorImpl<unsigned> &RetIndices,
    332                                  SmallVectorImpl<unsigned> &CallIndices,
    333                                  bool AllowDifferingSizes,
    334                                  const TargetLoweringBase &TLI) {
    335 
    336   // Trace the sub-value needed by the return value as far back up the graph as
    337   // possible, in the hope that it will intersect with the value produced by the
    338   // call. In the simple case with no "returned" attribute, the hope is actually
    339   // that we end up back at the tail call instruction itself.
    340   unsigned BitsRequired = UINT_MAX;
    341   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
    342 
    343   // If this slot in the value returned is undef, it doesn't matter what the
    344   // call puts there, it'll be fine.
    345   if (isa<UndefValue>(RetVal))
    346     return true;
    347 
    348   // Now do a similar search up through the graph to find where the value
    349   // actually returned by the "tail call" comes from. In the simple case without
    350   // a "returned" attribute, the search will be blocked immediately and the loop
    351   // a Noop.
    352   unsigned BitsProvided = UINT_MAX;
    353   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
    354 
    355   // There's no hope if we can't actually trace them to (the same part of!) the
    356   // same value.
    357   if (CallVal != RetVal || CallIndices != RetIndices)
    358     return false;
    359 
    360   // However, intervening truncates may have made the call non-tail. Make sure
    361   // all the bits that are needed by the "ret" have been provided by the "tail
    362   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
    363   // extensions too.
    364   if (BitsProvided < BitsRequired ||
    365       (!AllowDifferingSizes && BitsProvided != BitsRequired))
    366     return false;
    367 
    368   return true;
    369 }
    370 
    371 /// For an aggregate type, determine whether a given index is within bounds or
    372 /// not.
    373 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
    374   if (ArrayType *AT = dyn_cast<ArrayType>(T))
    375     return Idx < AT->getNumElements();
    376 
    377   return Idx < cast<StructType>(T)->getNumElements();
    378 }
    379 
    380 /// Move the given iterators to the next leaf type in depth first traversal.
    381 ///
    382 /// Performs a depth-first traversal of the type as specified by its arguments,
    383 /// stopping at the next leaf node (which may be a legitimate scalar type or an
    384 /// empty struct or array).
    385 ///
    386 /// @param SubTypes List of the partial components making up the type from
    387 /// outermost to innermost non-empty aggregate. The element currently
    388 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
    389 ///
    390 /// @param Path Set of extractvalue indices leading from the outermost type
    391 /// (SubTypes[0]) to the leaf node currently represented.
    392 ///
    393 /// @returns true if a new type was found, false otherwise. Calling this
    394 /// function again on a finished iterator will repeatedly return
    395 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
    396 /// aggregate or a non-aggregate
    397 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
    398                                   SmallVectorImpl<unsigned> &Path) {
    399   // First march back up the tree until we can successfully increment one of the
    400   // coordinates in Path.
    401   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
    402     Path.pop_back();
    403     SubTypes.pop_back();
    404   }
    405 
    406   // If we reached the top, then the iterator is done.
    407   if (Path.empty())
    408     return false;
    409 
    410   // We know there's *some* valid leaf now, so march back down the tree picking
    411   // out the left-most element at each node.
    412   ++Path.back();
    413   Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
    414   while (DeeperType->isAggregateType()) {
    415     CompositeType *CT = cast<CompositeType>(DeeperType);
    416     if (!indexReallyValid(CT, 0))
    417       return true;
    418 
    419     SubTypes.push_back(CT);
    420     Path.push_back(0);
    421 
    422     DeeperType = CT->getTypeAtIndex(0U);
    423   }
    424 
    425   return true;
    426 }
    427 
    428 /// Find the first non-empty, scalar-like type in Next and setup the iterator
    429 /// components.
    430 ///
    431 /// Assuming Next is an aggregate of some kind, this function will traverse the
    432 /// tree from left to right (i.e. depth-first) looking for the first
    433 /// non-aggregate type which will play a role in function return.
    434 ///
    435 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
    436 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
    437 /// i32 in that type.
    438 static bool firstRealType(Type *Next,
    439                           SmallVectorImpl<CompositeType *> &SubTypes,
    440                           SmallVectorImpl<unsigned> &Path) {
    441   // First initialise the iterator components to the first "leaf" node
    442   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
    443   // despite nominally being an aggregate).
    444   while (Next->isAggregateType() &&
    445          indexReallyValid(cast<CompositeType>(Next), 0)) {
    446     SubTypes.push_back(cast<CompositeType>(Next));
    447     Path.push_back(0);
    448     Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
    449   }
    450 
    451   // If there's no Path now, Next was originally scalar already (or empty
    452   // leaf). We're done.
    453   if (Path.empty())
    454     return true;
    455 
    456   // Otherwise, use normal iteration to keep looking through the tree until we
    457   // find a non-aggregate type.
    458   while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
    459     if (!advanceToNextLeafType(SubTypes, Path))
    460       return false;
    461   }
    462 
    463   return true;
    464 }
    465 
    466 /// Set the iterator data-structures to the next non-empty, non-aggregate
    467 /// subtype.
    468 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
    469                          SmallVectorImpl<unsigned> &Path) {
    470   do {
    471     if (!advanceToNextLeafType(SubTypes, Path))
    472       return false;
    473 
    474     assert(!Path.empty() && "found a leaf but didn't set the path?");
    475   } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
    476 
    477   return true;
    478 }
    479 
    480 
    481 /// Test if the given instruction is in a position to be optimized
    482 /// with a tail-call. This roughly means that it's in a block with
    483 /// a return and there's nothing that needs to be scheduled
    484 /// between it and the return.
    485 ///
    486 /// This function only tests target-independent requirements.
    487 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
    488   const Instruction *I = CS.getInstruction();
    489   const BasicBlock *ExitBB = I->getParent();
    490   const TerminatorInst *Term = ExitBB->getTerminator();
    491   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
    492 
    493   // The block must end in a return statement or unreachable.
    494   //
    495   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
    496   // an unreachable, for now. The way tailcall optimization is currently
    497   // implemented means it will add an epilogue followed by a jump. That is
    498   // not profitable. Also, if the callee is a special function (e.g.
    499   // longjmp on x86), it can end up causing miscompilation that has not
    500   // been fully understood.
    501   if (!Ret &&
    502       (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
    503     return false;
    504 
    505   // If I will have a chain, make sure no other instruction that will have a
    506   // chain interposes between I and the return.
    507   if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
    508       !isSafeToSpeculativelyExecute(I))
    509     for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
    510       if (&*BBI == I)
    511         break;
    512       // Debug info intrinsics do not get in the way of tail call optimization.
    513       if (isa<DbgInfoIntrinsic>(BBI))
    514         continue;
    515       if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
    516           !isSafeToSpeculativelyExecute(BBI))
    517         return false;
    518     }
    519 
    520   const Function *F = ExitBB->getParent();
    521   return returnTypeIsEligibleForTailCall(
    522       F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
    523 }
    524 
    525 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
    526                                            const Instruction *I,
    527                                            const ReturnInst *Ret,
    528                                            const TargetLoweringBase &TLI) {
    529   // If the block ends with a void return or unreachable, it doesn't matter
    530   // what the call's return type is.
    531   if (!Ret || Ret->getNumOperands() == 0) return true;
    532 
    533   // If the return value is undef, it doesn't matter what the call's
    534   // return type is.
    535   if (isa<UndefValue>(Ret->getOperand(0))) return true;
    536 
    537   // Make sure the attributes attached to each return are compatible.
    538   AttrBuilder CallerAttrs(F->getAttributes(),
    539                           AttributeSet::ReturnIndex);
    540   AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
    541                           AttributeSet::ReturnIndex);
    542 
    543   // Noalias is completely benign as far as calling convention goes, it
    544   // shouldn't affect whether the call is a tail call.
    545   CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
    546   CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
    547 
    548   bool AllowDifferingSizes = true;
    549   if (CallerAttrs.contains(Attribute::ZExt)) {
    550     if (!CalleeAttrs.contains(Attribute::ZExt))
    551       return false;
    552 
    553     AllowDifferingSizes = false;
    554     CallerAttrs.removeAttribute(Attribute::ZExt);
    555     CalleeAttrs.removeAttribute(Attribute::ZExt);
    556   } else if (CallerAttrs.contains(Attribute::SExt)) {
    557     if (!CalleeAttrs.contains(Attribute::SExt))
    558       return false;
    559 
    560     AllowDifferingSizes = false;
    561     CallerAttrs.removeAttribute(Attribute::SExt);
    562     CalleeAttrs.removeAttribute(Attribute::SExt);
    563   }
    564 
    565   // If they're still different, there's some facet we don't understand
    566   // (currently only "inreg", but in future who knows). It may be OK but the
    567   // only safe option is to reject the tail call.
    568   if (CallerAttrs != CalleeAttrs)
    569     return false;
    570 
    571   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
    572   SmallVector<unsigned, 4> RetPath, CallPath;
    573   SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
    574 
    575   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
    576   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
    577 
    578   // Nothing's actually returned, it doesn't matter what the callee put there
    579   // it's a valid tail call.
    580   if (RetEmpty)
    581     return true;
    582 
    583   // Iterate pairwise through each of the value types making up the tail call
    584   // and the corresponding return. For each one we want to know whether it's
    585   // essentially going directly from the tail call to the ret, via operations
    586   // that end up not generating any code.
    587   //
    588   // We allow a certain amount of covariance here. For example it's permitted
    589   // for the tail call to define more bits than the ret actually cares about
    590   // (e.g. via a truncate).
    591   do {
    592     if (CallEmpty) {
    593       // We've exhausted the values produced by the tail call instruction, the
    594       // rest are essentially undef. The type doesn't really matter, but we need
    595       // *something*.
    596       Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
    597       CallVal = UndefValue::get(SlotType);
    598     }
    599 
    600     // The manipulations performed when we're looking through an insertvalue or
    601     // an extractvalue would happen at the front of the RetPath list, so since
    602     // we have to copy it anyway it's more efficient to create a reversed copy.
    603     SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
    604     SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
    605 
    606     // Finally, we can check whether the value produced by the tail call at this
    607     // index is compatible with the value we return.
    608     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
    609                               AllowDifferingSizes, TLI))
    610       return false;
    611 
    612     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
    613   } while(nextRealType(RetSubTypes, RetPath));
    614 
    615   return true;
    616 }
    617 
    618 bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) {
    619   if (!GV->hasLinkOnceODRLinkage())
    620     return false;
    621 
    622   if (GV->hasUnnamedAddr())
    623     return true;
    624 
    625   // If it is a non constant variable, it needs to be uniqued across shared
    626   // objects.
    627   if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
    628     if (!Var->isConstant())
    629       return false;
    630   }
    631 
    632   // An alias can point to a variable. We could try to resolve the alias to
    633   // decide, but for now just don't hide them.
    634   if (isa<GlobalAlias>(GV))
    635     return false;
    636 
    637   GlobalStatus GS;
    638   if (GlobalStatus::analyzeGlobal(GV, GS))
    639     return false;
    640 
    641   return !GS.IsCompared;
    642 }
    643