Home | History | Annotate | Download | only in CodeGen
      1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines several CodeGen-specific LLVM IR analysis utilties.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/CodeGen/Analysis.h"
     15 #include "llvm/Analysis/ValueTracking.h"
     16 #include "llvm/CodeGen/MachineFunction.h"
     17 #include "llvm/IR/DataLayout.h"
     18 #include "llvm/IR/DerivedTypes.h"
     19 #include "llvm/IR/Function.h"
     20 #include "llvm/IR/Instructions.h"
     21 #include "llvm/IR/IntrinsicInst.h"
     22 #include "llvm/IR/LLVMContext.h"
     23 #include "llvm/IR/Module.h"
     24 #include "llvm/Support/ErrorHandling.h"
     25 #include "llvm/Support/MathExtras.h"
     26 #include "llvm/Target/TargetLowering.h"
     27 using namespace llvm;
     28 
     29 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
     30 /// of insertvalue or extractvalue indices that identify a member, return
     31 /// the linearized index of the start of the member.
     32 ///
     33 unsigned llvm::ComputeLinearIndex(Type *Ty,
     34                                   const unsigned *Indices,
     35                                   const unsigned *IndicesEnd,
     36                                   unsigned CurIndex) {
     37   // Base case: We're done.
     38   if (Indices && Indices == IndicesEnd)
     39     return CurIndex;
     40 
     41   // Given a struct type, recursively traverse the elements.
     42   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     43     for (StructType::element_iterator EB = STy->element_begin(),
     44                                       EI = EB,
     45                                       EE = STy->element_end();
     46         EI != EE; ++EI) {
     47       if (Indices && *Indices == unsigned(EI - EB))
     48         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
     49       CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
     50     }
     51     return CurIndex;
     52   }
     53   // Given an array type, recursively traverse the elements.
     54   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     55     Type *EltTy = ATy->getElementType();
     56     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
     57       if (Indices && *Indices == i)
     58         return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
     59       CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
     60     }
     61     return CurIndex;
     62   }
     63   // We haven't found the type we're looking for, so keep searching.
     64   return CurIndex + 1;
     65 }
     66 
     67 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
     68 /// EVTs that represent all the individual underlying
     69 /// non-aggregate types that comprise it.
     70 ///
     71 /// If Offsets is non-null, it points to a vector to be filled in
     72 /// with the in-memory offsets of each of the individual values.
     73 ///
     74 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
     75                            SmallVectorImpl<EVT> &ValueVTs,
     76                            SmallVectorImpl<uint64_t> *Offsets,
     77                            uint64_t StartingOffset) {
     78   // Given a struct type, recursively traverse the elements.
     79   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     80     const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
     81     for (StructType::element_iterator EB = STy->element_begin(),
     82                                       EI = EB,
     83                                       EE = STy->element_end();
     84          EI != EE; ++EI)
     85       ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
     86                       StartingOffset + SL->getElementOffset(EI - EB));
     87     return;
     88   }
     89   // Given an array type, recursively traverse the elements.
     90   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     91     Type *EltTy = ATy->getElementType();
     92     uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
     93     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
     94       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
     95                       StartingOffset + i * EltSize);
     96     return;
     97   }
     98   // Interpret void as zero return values.
     99   if (Ty->isVoidTy())
    100     return;
    101   // Base case: we can get an EVT for this LLVM IR type.
    102   ValueVTs.push_back(TLI.getValueType(Ty));
    103   if (Offsets)
    104     Offsets->push_back(StartingOffset);
    105 }
    106 
    107 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
    108 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
    109   V = V->stripPointerCasts();
    110   GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
    111 
    112   if (GV && GV->getName() == "llvm.eh.catch.all.value") {
    113     assert(GV->hasInitializer() &&
    114            "The EH catch-all value must have an initializer");
    115     Value *Init = GV->getInitializer();
    116     GV = dyn_cast<GlobalVariable>(Init);
    117     if (!GV) V = cast<ConstantPointerNull>(Init);
    118   }
    119 
    120   assert((GV || isa<ConstantPointerNull>(V)) &&
    121          "TypeInfo must be a global variable or NULL");
    122   return GV;
    123 }
    124 
    125 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
    126 /// processed uses a memory 'm' constraint.
    127 bool
    128 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
    129                                 const TargetLowering &TLI) {
    130   for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
    131     InlineAsm::ConstraintInfo &CI = CInfos[i];
    132     for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
    133       TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
    134       if (CType == TargetLowering::C_Memory)
    135         return true;
    136     }
    137 
    138     // Indirect operand accesses access memory.
    139     if (CI.isIndirect)
    140       return true;
    141   }
    142 
    143   return false;
    144 }
    145 
    146 /// getFCmpCondCode - Return the ISD condition code corresponding to
    147 /// the given LLVM IR floating-point condition code.  This includes
    148 /// consideration of global floating-point math flags.
    149 ///
    150 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
    151   switch (Pred) {
    152   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
    153   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
    154   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
    155   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
    156   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
    157   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
    158   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
    159   case FCmpInst::FCMP_ORD:   return ISD::SETO;
    160   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
    161   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
    162   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
    163   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
    164   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
    165   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
    166   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
    167   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
    168   default: llvm_unreachable("Invalid FCmp predicate opcode!");
    169   }
    170 }
    171 
    172 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
    173   switch (CC) {
    174     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
    175     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
    176     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
    177     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
    178     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
    179     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
    180     default: return CC;
    181   }
    182 }
    183 
    184 /// getICmpCondCode - Return the ISD condition code corresponding to
    185 /// the given LLVM IR integer condition code.
    186 ///
    187 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
    188   switch (Pred) {
    189   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
    190   case ICmpInst::ICMP_NE:  return ISD::SETNE;
    191   case ICmpInst::ICMP_SLE: return ISD::SETLE;
    192   case ICmpInst::ICMP_ULE: return ISD::SETULE;
    193   case ICmpInst::ICMP_SGE: return ISD::SETGE;
    194   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
    195   case ICmpInst::ICMP_SLT: return ISD::SETLT;
    196   case ICmpInst::ICMP_ULT: return ISD::SETULT;
    197   case ICmpInst::ICMP_SGT: return ISD::SETGT;
    198   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
    199   default:
    200     llvm_unreachable("Invalid ICmp predicate opcode!");
    201   }
    202 }
    203 
    204 static bool isNoopBitcast(Type *T1, Type *T2,
    205                           const TargetLoweringBase& TLI) {
    206   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
    207          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
    208           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
    209 }
    210 
    211 /// Look through operations that will be free to find the earliest source of
    212 /// this value.
    213 ///
    214 /// @param ValLoc If V has aggegate type, we will be interested in a particular
    215 /// scalar component. This records its address; the reverse of this list gives a
    216 /// sequence of indices appropriate for an extractvalue to locate the important
    217 /// value. This value is updated during the function and on exit will indicate
    218 /// similar information for the Value returned.
    219 ///
    220 /// @param DataBits If this function looks through truncate instructions, this
    221 /// will record the smallest size attained.
    222 static const Value *getNoopInput(const Value *V,
    223                                  SmallVectorImpl<unsigned> &ValLoc,
    224                                  unsigned &DataBits,
    225                                  const TargetLoweringBase &TLI) {
    226   while (true) {
    227     // Try to look through V1; if V1 is not an instruction, it can't be looked
    228     // through.
    229     const Instruction *I = dyn_cast<Instruction>(V);
    230     if (!I || I->getNumOperands() == 0) return V;
    231     const Value *NoopInput = 0;
    232 
    233     Value *Op = I->getOperand(0);
    234     if (isa<BitCastInst>(I)) {
    235       // Look through truly no-op bitcasts.
    236       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
    237         NoopInput = Op;
    238     } else if (isa<GetElementPtrInst>(I)) {
    239       // Look through getelementptr
    240       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
    241         NoopInput = Op;
    242     } else if (isa<IntToPtrInst>(I)) {
    243       // Look through inttoptr.
    244       // Make sure this isn't a truncating or extending cast.  We could
    245       // support this eventually, but don't bother for now.
    246       if (!isa<VectorType>(I->getType()) &&
    247           TLI.getPointerTy().getSizeInBits() ==
    248           cast<IntegerType>(Op->getType())->getBitWidth())
    249         NoopInput = Op;
    250     } else if (isa<PtrToIntInst>(I)) {
    251       // Look through ptrtoint.
    252       // Make sure this isn't a truncating or extending cast.  We could
    253       // support this eventually, but don't bother for now.
    254       if (!isa<VectorType>(I->getType()) &&
    255           TLI.getPointerTy().getSizeInBits() ==
    256           cast<IntegerType>(I->getType())->getBitWidth())
    257         NoopInput = Op;
    258     } else if (isa<TruncInst>(I) &&
    259                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
    260       DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
    261       NoopInput = Op;
    262     } else if (isa<CallInst>(I)) {
    263       // Look through call (skipping callee)
    264       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
    265            i != e; ++i) {
    266         unsigned attrInd = i - I->op_begin() + 1;
    267         if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    268             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    269           NoopInput = *i;
    270           break;
    271         }
    272       }
    273     } else if (isa<InvokeInst>(I)) {
    274       // Look through invoke (skipping BB, BB, Callee)
    275       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
    276            i != e; ++i) {
    277         unsigned attrInd = i - I->op_begin() + 1;
    278         if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    279             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    280           NoopInput = *i;
    281           break;
    282         }
    283       }
    284     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
    285       // Value may come from either the aggregate or the scalar
    286       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
    287       if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
    288                      ValLoc.rbegin())) {
    289         // The type being inserted is a nested sub-type of the aggregate; we
    290         // have to remove those initial indices to get the location we're
    291         // interested in for the operand.
    292         ValLoc.resize(ValLoc.size() - InsertLoc.size());
    293         NoopInput = IVI->getInsertedValueOperand();
    294       } else {
    295         // The struct we're inserting into has the value we're interested in, no
    296         // change of address.
    297         NoopInput = Op;
    298       }
    299     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
    300       // The part we're interested in will inevitably be some sub-section of the
    301       // previous aggregate. Combine the two paths to obtain the true address of
    302       // our element.
    303       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
    304       std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(),
    305                 std::back_inserter(ValLoc));
    306       NoopInput = Op;
    307     }
    308     // Terminate if we couldn't find anything to look through.
    309     if (!NoopInput)
    310       return V;
    311 
    312     V = NoopInput;
    313   }
    314 }
    315 
    316 /// Return true if this scalar return value only has bits discarded on its path
    317 /// from the "tail call" to the "ret". This includes the obvious noop
    318 /// instructions handled by getNoopInput above as well as free truncations (or
    319 /// extensions prior to the call).
    320 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
    321                                  SmallVectorImpl<unsigned> &RetIndices,
    322                                  SmallVectorImpl<unsigned> &CallIndices,
    323                                  const TargetLoweringBase &TLI) {
    324 
    325   // Trace the sub-value needed by the return value as far back up the graph as
    326   // possible, in the hope that it will intersect with the value produced by the
    327   // call. In the simple case with no "returned" attribute, the hope is actually
    328   // that we end up back at the tail call instruction itself.
    329   unsigned BitsRequired = UINT_MAX;
    330   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
    331 
    332   // If this slot in the value returned is undef, it doesn't matter what the
    333   // call puts there, it'll be fine.
    334   if (isa<UndefValue>(RetVal))
    335     return true;
    336 
    337   // Now do a similar search up through the graph to find where the value
    338   // actually returned by the "tail call" comes from. In the simple case without
    339   // a "returned" attribute, the search will be blocked immediately and the loop
    340   // a Noop.
    341   unsigned BitsProvided = UINT_MAX;
    342   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
    343 
    344   // There's no hope if we can't actually trace them to (the same part of!) the
    345   // same value.
    346   if (CallVal != RetVal || CallIndices != RetIndices)
    347     return false;
    348 
    349   // However, intervening truncates may have made the call non-tail. Make sure
    350   // all the bits that are needed by the "ret" have been provided by the "tail
    351   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
    352   // extensions too.
    353   if (BitsProvided < BitsRequired)
    354     return false;
    355 
    356   return true;
    357 }
    358 
    359 /// For an aggregate type, determine whether a given index is within bounds or
    360 /// not.
    361 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
    362   if (ArrayType *AT = dyn_cast<ArrayType>(T))
    363     return Idx < AT->getNumElements();
    364 
    365   return Idx < cast<StructType>(T)->getNumElements();
    366 }
    367 
    368 /// Move the given iterators to the next leaf type in depth first traversal.
    369 ///
    370 /// Performs a depth-first traversal of the type as specified by its arguments,
    371 /// stopping at the next leaf node (which may be a legitimate scalar type or an
    372 /// empty struct or array).
    373 ///
    374 /// @param SubTypes List of the partial components making up the type from
    375 /// outermost to innermost non-empty aggregate. The element currently
    376 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
    377 ///
    378 /// @param Path Set of extractvalue indices leading from the outermost type
    379 /// (SubTypes[0]) to the leaf node currently represented.
    380 ///
    381 /// @returns true if a new type was found, false otherwise. Calling this
    382 /// function again on a finished iterator will repeatedly return
    383 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
    384 /// aggregate or a non-aggregate
    385 static bool
    386 advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
    387                      SmallVectorImpl<unsigned> &Path) {
    388   // First march back up the tree until we can successfully increment one of the
    389   // coordinates in Path.
    390   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
    391     Path.pop_back();
    392     SubTypes.pop_back();
    393   }
    394 
    395   // If we reached the top, then the iterator is done.
    396   if (Path.empty())
    397     return false;
    398 
    399   // We know there's *some* valid leaf now, so march back down the tree picking
    400   // out the left-most element at each node.
    401   ++Path.back();
    402   Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
    403   while (DeeperType->isAggregateType()) {
    404     CompositeType *CT = cast<CompositeType>(DeeperType);
    405     if (!indexReallyValid(CT, 0))
    406       return true;
    407 
    408     SubTypes.push_back(CT);
    409     Path.push_back(0);
    410 
    411     DeeperType = CT->getTypeAtIndex(0U);
    412   }
    413 
    414   return true;
    415 }
    416 
    417 /// Find the first non-empty, scalar-like type in Next and setup the iterator
    418 /// components.
    419 ///
    420 /// Assuming Next is an aggregate of some kind, this function will traverse the
    421 /// tree from left to right (i.e. depth-first) looking for the first
    422 /// non-aggregate type which will play a role in function return.
    423 ///
    424 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
    425 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
    426 /// i32 in that type.
    427 static bool firstRealType(Type *Next,
    428                           SmallVectorImpl<CompositeType *> &SubTypes,
    429                           SmallVectorImpl<unsigned> &Path) {
    430   // First initialise the iterator components to the first "leaf" node
    431   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
    432   // despite nominally being an aggregate).
    433   while (Next->isAggregateType() &&
    434          indexReallyValid(cast<CompositeType>(Next), 0)) {
    435     SubTypes.push_back(cast<CompositeType>(Next));
    436     Path.push_back(0);
    437     Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
    438   }
    439 
    440   // If there's no Path now, Next was originally scalar already (or empty
    441   // leaf). We're done.
    442   if (Path.empty())
    443     return true;
    444 
    445   // Otherwise, use normal iteration to keep looking through the tree until we
    446   // find a non-aggregate type.
    447   while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
    448     if (!advanceToNextLeafType(SubTypes, Path))
    449       return false;
    450   }
    451 
    452   return true;
    453 }
    454 
    455 /// Set the iterator data-structures to the next non-empty, non-aggregate
    456 /// subtype.
    457 bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
    458                   SmallVectorImpl<unsigned> &Path) {
    459   do {
    460     if (!advanceToNextLeafType(SubTypes, Path))
    461       return false;
    462 
    463     assert(!Path.empty() && "found a leaf but didn't set the path?");
    464   } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
    465 
    466   return true;
    467 }
    468 
    469 
    470 /// Test if the given instruction is in a position to be optimized
    471 /// with a tail-call. This roughly means that it's in a block with
    472 /// a return and there's nothing that needs to be scheduled
    473 /// between it and the return.
    474 ///
    475 /// This function only tests target-independent requirements.
    476 bool llvm::isInTailCallPosition(ImmutableCallSite CS,
    477                                 const TargetLowering &TLI) {
    478   const Instruction *I = CS.getInstruction();
    479   const BasicBlock *ExitBB = I->getParent();
    480   const TerminatorInst *Term = ExitBB->getTerminator();
    481   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
    482 
    483   // The block must end in a return statement or unreachable.
    484   //
    485   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
    486   // an unreachable, for now. The way tailcall optimization is currently
    487   // implemented means it will add an epilogue followed by a jump. That is
    488   // not profitable. Also, if the callee is a special function (e.g.
    489   // longjmp on x86), it can end up causing miscompilation that has not
    490   // been fully understood.
    491   if (!Ret &&
    492       (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
    493        !isa<UnreachableInst>(Term)))
    494     return false;
    495 
    496   // If I will have a chain, make sure no other instruction that will have a
    497   // chain interposes between I and the return.
    498   if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
    499       !isSafeToSpeculativelyExecute(I))
    500     for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
    501          --BBI) {
    502       if (&*BBI == I)
    503         break;
    504       // Debug info intrinsics do not get in the way of tail call optimization.
    505       if (isa<DbgInfoIntrinsic>(BBI))
    506         continue;
    507       if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
    508           !isSafeToSpeculativelyExecute(BBI))
    509         return false;
    510     }
    511 
    512   // If the block ends with a void return or unreachable, it doesn't matter
    513   // what the call's return type is.
    514   if (!Ret || Ret->getNumOperands() == 0) return true;
    515 
    516   // If the return value is undef, it doesn't matter what the call's
    517   // return type is.
    518   if (isa<UndefValue>(Ret->getOperand(0))) return true;
    519 
    520   // Conservatively require the attributes of the call to match those of
    521   // the return. Ignore noalias because it doesn't affect the call sequence.
    522   const Function *F = ExitBB->getParent();
    523   AttributeSet CallerAttrs = F->getAttributes();
    524   if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex).
    525         removeAttribute(Attribute::NoAlias) !=
    526       AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex).
    527         removeAttribute(Attribute::NoAlias))
    528     return false;
    529 
    530   // It's not safe to eliminate the sign / zero extension of the return value.
    531   if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
    532       CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
    533     return false;
    534 
    535   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
    536   SmallVector<unsigned, 4> RetPath, CallPath;
    537   SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
    538 
    539   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
    540   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
    541 
    542   // Nothing's actually returned, it doesn't matter what the callee put there
    543   // it's a valid tail call.
    544   if (RetEmpty)
    545     return true;
    546 
    547   // Iterate pairwise through each of the value types making up the tail call
    548   // and the corresponding return. For each one we want to know whether it's
    549   // essentially going directly from the tail call to the ret, via operations
    550   // that end up not generating any code.
    551   //
    552   // We allow a certain amount of covariance here. For example it's permitted
    553   // for the tail call to define more bits than the ret actually cares about
    554   // (e.g. via a truncate).
    555   do {
    556     if (CallEmpty) {
    557       // We've exhausted the values produced by the tail call instruction, the
    558       // rest are essentially undef. The type doesn't really matter, but we need
    559       // *something*.
    560       Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
    561       CallVal = UndefValue::get(SlotType);
    562     }
    563 
    564     // The manipulations performed when we're looking through an insertvalue or
    565     // an extractvalue would happen at the front of the RetPath list, so since
    566     // we have to copy it anyway it's more efficient to create a reversed copy.
    567     using std::copy;
    568     SmallVector<unsigned, 4> TmpRetPath, TmpCallPath;
    569     copy(RetPath.rbegin(), RetPath.rend(), std::back_inserter(TmpRetPath));
    570     copy(CallPath.rbegin(), CallPath.rend(), std::back_inserter(TmpCallPath));
    571 
    572     // Finally, we can check whether the value produced by the tail call at this
    573     // index is compatible with the value we return.
    574     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath, TLI))
    575       return false;
    576 
    577     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
    578   } while(nextRealType(RetSubTypes, RetPath));
    579 
    580   return true;
    581 }
    582