Home | History | Annotate | Download | only in CodeGen
      1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/CodeGen/Analysis.h"
     15 #include "llvm/Analysis/ValueTracking.h"
     16 #include "llvm/CodeGen/MachineFunction.h"
     17 #include "llvm/CodeGen/SelectionDAG.h"
     18 #include "llvm/IR/DataLayout.h"
     19 #include "llvm/IR/DerivedTypes.h"
     20 #include "llvm/IR/Function.h"
     21 #include "llvm/IR/Instructions.h"
     22 #include "llvm/IR/IntrinsicInst.h"
     23 #include "llvm/IR/LLVMContext.h"
     24 #include "llvm/IR/Module.h"
     25 #include "llvm/Support/ErrorHandling.h"
     26 #include "llvm/Support/MathExtras.h"
     27 #include "llvm/Target/TargetLowering.h"
     28 using namespace llvm;
     29 
     30 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
     31 /// of insertvalue or extractvalue indices that identify a member, return
     32 /// the linearized index of the start of the member.
     33 ///
     34 unsigned llvm::ComputeLinearIndex(Type *Ty,
     35                                   const unsigned *Indices,
     36                                   const unsigned *IndicesEnd,
     37                                   unsigned CurIndex) {
     38   // Base case: We're done.
     39   if (Indices && Indices == IndicesEnd)
     40     return CurIndex;
     41 
     42   // Given a struct type, recursively traverse the elements.
     43   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     44     for (StructType::element_iterator EB = STy->element_begin(),
     45                                       EI = EB,
     46                                       EE = STy->element_end();
     47         EI != EE; ++EI) {
     48       if (Indices && *Indices == unsigned(EI - EB))
     49         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
     50       CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
     51     }
     52     return CurIndex;
     53   }
     54   // Given an array type, recursively traverse the elements.
     55   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     56     Type *EltTy = ATy->getElementType();
     57     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
     58       if (Indices && *Indices == i)
     59         return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
     60       CurIndex = ComputeLinearIndex(EltTy, nullptr, nullptr, CurIndex);
     61     }
     62     return CurIndex;
     63   }
     64   // We haven't found the type we're looking for, so keep searching.
     65   return CurIndex + 1;
     66 }
     67 
     68 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
     69 /// EVTs that represent all the individual underlying
     70 /// non-aggregate types that comprise it.
     71 ///
     72 /// If Offsets is non-null, it points to a vector to be filled in
     73 /// with the in-memory offsets of each of the individual values.
     74 ///
     75 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
     76                            SmallVectorImpl<EVT> &ValueVTs,
     77                            SmallVectorImpl<uint64_t> *Offsets,
     78                            uint64_t StartingOffset) {
     79   // Given a struct type, recursively traverse the elements.
     80   if (StructType *STy = dyn_cast<StructType>(Ty)) {
     81     const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
     82     for (StructType::element_iterator EB = STy->element_begin(),
     83                                       EI = EB,
     84                                       EE = STy->element_end();
     85          EI != EE; ++EI)
     86       ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
     87                       StartingOffset + SL->getElementOffset(EI - EB));
     88     return;
     89   }
     90   // Given an array type, recursively traverse the elements.
     91   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     92     Type *EltTy = ATy->getElementType();
     93     uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
     94     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
     95       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
     96                       StartingOffset + i * EltSize);
     97     return;
     98   }
     99   // Interpret void as zero return values.
    100   if (Ty->isVoidTy())
    101     return;
    102   // Base case: we can get an EVT for this LLVM IR type.
    103   ValueVTs.push_back(TLI.getValueType(Ty));
    104   if (Offsets)
    105     Offsets->push_back(StartingOffset);
    106 }
    107 
    108 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
    109 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
    110   V = V->stripPointerCasts();
    111   GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
    112 
    113   if (GV && GV->getName() == "llvm.eh.catch.all.value") {
    114     assert(GV->hasInitializer() &&
    115            "The EH catch-all value must have an initializer");
    116     Value *Init = GV->getInitializer();
    117     GV = dyn_cast<GlobalVariable>(Init);
    118     if (!GV) V = cast<ConstantPointerNull>(Init);
    119   }
    120 
    121   assert((GV || isa<ConstantPointerNull>(V)) &&
    122          "TypeInfo must be a global variable or NULL");
    123   return GV;
    124 }
    125 
    126 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
    127 /// processed uses a memory 'm' constraint.
    128 bool
    129 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
    130                                 const TargetLowering &TLI) {
    131   for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
    132     InlineAsm::ConstraintInfo &CI = CInfos[i];
    133     for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
    134       TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
    135       if (CType == TargetLowering::C_Memory)
    136         return true;
    137     }
    138 
    139     // Indirect operand accesses access memory.
    140     if (CI.isIndirect)
    141       return true;
    142   }
    143 
    144   return false;
    145 }
    146 
    147 /// getFCmpCondCode - Return the ISD condition code corresponding to
    148 /// the given LLVM IR floating-point condition code.  This includes
    149 /// consideration of global floating-point math flags.
    150 ///
    151 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
    152   switch (Pred) {
    153   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
    154   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
    155   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
    156   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
    157   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
    158   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
    159   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
    160   case FCmpInst::FCMP_ORD:   return ISD::SETO;
    161   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
    162   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
    163   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
    164   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
    165   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
    166   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
    167   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
    168   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
    169   default: llvm_unreachable("Invalid FCmp predicate opcode!");
    170   }
    171 }
    172 
    173 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
    174   switch (CC) {
    175     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
    176     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
    177     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
    178     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
    179     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
    180     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
    181     default: return CC;
    182   }
    183 }
    184 
    185 /// getICmpCondCode - Return the ISD condition code corresponding to
    186 /// the given LLVM IR integer condition code.
    187 ///
    188 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
    189   switch (Pred) {
    190   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
    191   case ICmpInst::ICMP_NE:  return ISD::SETNE;
    192   case ICmpInst::ICMP_SLE: return ISD::SETLE;
    193   case ICmpInst::ICMP_ULE: return ISD::SETULE;
    194   case ICmpInst::ICMP_SGE: return ISD::SETGE;
    195   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
    196   case ICmpInst::ICMP_SLT: return ISD::SETLT;
    197   case ICmpInst::ICMP_ULT: return ISD::SETULT;
    198   case ICmpInst::ICMP_SGT: return ISD::SETGT;
    199   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
    200   default:
    201     llvm_unreachable("Invalid ICmp predicate opcode!");
    202   }
    203 }
    204 
    205 static bool isNoopBitcast(Type *T1, Type *T2,
    206                           const TargetLoweringBase& TLI) {
    207   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
    208          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
    209           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
    210 }
    211 
    212 /// Look through operations that will be free to find the earliest source of
    213 /// this value.
    214 ///
    215 /// @param ValLoc If V has aggegate type, we will be interested in a particular
    216 /// scalar component. This records its address; the reverse of this list gives a
    217 /// sequence of indices appropriate for an extractvalue to locate the important
    218 /// value. This value is updated during the function and on exit will indicate
    219 /// similar information for the Value returned.
    220 ///
    221 /// @param DataBits If this function looks through truncate instructions, this
    222 /// will record the smallest size attained.
    223 static const Value *getNoopInput(const Value *V,
    224                                  SmallVectorImpl<unsigned> &ValLoc,
    225                                  unsigned &DataBits,
    226                                  const TargetLoweringBase &TLI) {
    227   while (true) {
    228     // Try to look through V1; if V1 is not an instruction, it can't be looked
    229     // through.
    230     const Instruction *I = dyn_cast<Instruction>(V);
    231     if (!I || I->getNumOperands() == 0) return V;
    232     const Value *NoopInput = nullptr;
    233 
    234     Value *Op = I->getOperand(0);
    235     if (isa<BitCastInst>(I)) {
    236       // Look through truly no-op bitcasts.
    237       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
    238         NoopInput = Op;
    239     } else if (isa<GetElementPtrInst>(I)) {
    240       // Look through getelementptr
    241       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
    242         NoopInput = Op;
    243     } else if (isa<IntToPtrInst>(I)) {
    244       // Look through inttoptr.
    245       // Make sure this isn't a truncating or extending cast.  We could
    246       // support this eventually, but don't bother for now.
    247       if (!isa<VectorType>(I->getType()) &&
    248           TLI.getPointerTy().getSizeInBits() ==
    249           cast<IntegerType>(Op->getType())->getBitWidth())
    250         NoopInput = Op;
    251     } else if (isa<PtrToIntInst>(I)) {
    252       // Look through ptrtoint.
    253       // Make sure this isn't a truncating or extending cast.  We could
    254       // support this eventually, but don't bother for now.
    255       if (!isa<VectorType>(I->getType()) &&
    256           TLI.getPointerTy().getSizeInBits() ==
    257           cast<IntegerType>(I->getType())->getBitWidth())
    258         NoopInput = Op;
    259     } else if (isa<TruncInst>(I) &&
    260                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
    261       DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
    262       NoopInput = Op;
    263     } else if (isa<CallInst>(I)) {
    264       // Look through call (skipping callee)
    265       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
    266            i != e; ++i) {
    267         unsigned attrInd = i - I->op_begin() + 1;
    268         if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    269             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    270           NoopInput = *i;
    271           break;
    272         }
    273       }
    274     } else if (isa<InvokeInst>(I)) {
    275       // Look through invoke (skipping BB, BB, Callee)
    276       for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
    277            i != e; ++i) {
    278         unsigned attrInd = i - I->op_begin() + 1;
    279         if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
    280             isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
    281           NoopInput = *i;
    282           break;
    283         }
    284       }
    285     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
    286       // Value may come from either the aggregate or the scalar
    287       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
    288       if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
    289                      ValLoc.rbegin())) {
    290         // The type being inserted is a nested sub-type of the aggregate; we
    291         // have to remove those initial indices to get the location we're
    292         // interested in for the operand.
    293         ValLoc.resize(ValLoc.size() - InsertLoc.size());
    294         NoopInput = IVI->getInsertedValueOperand();
    295       } else {
    296         // The struct we're inserting into has the value we're interested in, no
    297         // change of address.
    298         NoopInput = Op;
    299       }
    300     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
    301       // The part we're interested in will inevitably be some sub-section of the
    302       // previous aggregate. Combine the two paths to obtain the true address of
    303       // our element.
    304       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
    305       std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(),
    306                 std::back_inserter(ValLoc));
    307       NoopInput = Op;
    308     }
    309     // Terminate if we couldn't find anything to look through.
    310     if (!NoopInput)
    311       return V;
    312 
    313     V = NoopInput;
    314   }
    315 }
    316 
    317 /// Return true if this scalar return value only has bits discarded on its path
    318 /// from the "tail call" to the "ret". This includes the obvious noop
    319 /// instructions handled by getNoopInput above as well as free truncations (or
    320 /// extensions prior to the call).
    321 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
    322                                  SmallVectorImpl<unsigned> &RetIndices,
    323                                  SmallVectorImpl<unsigned> &CallIndices,
    324                                  bool AllowDifferingSizes,
    325                                  const TargetLoweringBase &TLI) {
    326 
    327   // Trace the sub-value needed by the return value as far back up the graph as
    328   // possible, in the hope that it will intersect with the value produced by the
    329   // call. In the simple case with no "returned" attribute, the hope is actually
    330   // that we end up back at the tail call instruction itself.
    331   unsigned BitsRequired = UINT_MAX;
    332   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
    333 
    334   // If this slot in the value returned is undef, it doesn't matter what the
    335   // call puts there, it'll be fine.
    336   if (isa<UndefValue>(RetVal))
    337     return true;
    338 
    339   // Now do a similar search up through the graph to find where the value
    340   // actually returned by the "tail call" comes from. In the simple case without
    341   // a "returned" attribute, the search will be blocked immediately and the loop
    342   // a Noop.
    343   unsigned BitsProvided = UINT_MAX;
    344   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
    345 
    346   // There's no hope if we can't actually trace them to (the same part of!) the
    347   // same value.
    348   if (CallVal != RetVal || CallIndices != RetIndices)
    349     return false;
    350 
    351   // However, intervening truncates may have made the call non-tail. Make sure
    352   // all the bits that are needed by the "ret" have been provided by the "tail
    353   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
    354   // extensions too.
    355   if (BitsProvided < BitsRequired ||
    356       (!AllowDifferingSizes && BitsProvided != BitsRequired))
    357     return false;
    358 
    359   return true;
    360 }
    361 
    362 /// For an aggregate type, determine whether a given index is within bounds or
    363 /// not.
    364 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
    365   if (ArrayType *AT = dyn_cast<ArrayType>(T))
    366     return Idx < AT->getNumElements();
    367 
    368   return Idx < cast<StructType>(T)->getNumElements();
    369 }
    370 
    371 /// Move the given iterators to the next leaf type in depth first traversal.
    372 ///
    373 /// Performs a depth-first traversal of the type as specified by its arguments,
    374 /// stopping at the next leaf node (which may be a legitimate scalar type or an
    375 /// empty struct or array).
    376 ///
    377 /// @param SubTypes List of the partial components making up the type from
    378 /// outermost to innermost non-empty aggregate. The element currently
    379 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
    380 ///
    381 /// @param Path Set of extractvalue indices leading from the outermost type
    382 /// (SubTypes[0]) to the leaf node currently represented.
    383 ///
    384 /// @returns true if a new type was found, false otherwise. Calling this
    385 /// function again on a finished iterator will repeatedly return
    386 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
    387 /// aggregate or a non-aggregate
    388 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
    389                                   SmallVectorImpl<unsigned> &Path) {
    390   // First march back up the tree until we can successfully increment one of the
    391   // coordinates in Path.
    392   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
    393     Path.pop_back();
    394     SubTypes.pop_back();
    395   }
    396 
    397   // If we reached the top, then the iterator is done.
    398   if (Path.empty())
    399     return false;
    400 
    401   // We know there's *some* valid leaf now, so march back down the tree picking
    402   // out the left-most element at each node.
    403   ++Path.back();
    404   Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
    405   while (DeeperType->isAggregateType()) {
    406     CompositeType *CT = cast<CompositeType>(DeeperType);
    407     if (!indexReallyValid(CT, 0))
    408       return true;
    409 
    410     SubTypes.push_back(CT);
    411     Path.push_back(0);
    412 
    413     DeeperType = CT->getTypeAtIndex(0U);
    414   }
    415 
    416   return true;
    417 }
    418 
    419 /// Find the first non-empty, scalar-like type in Next and setup the iterator
    420 /// components.
    421 ///
    422 /// Assuming Next is an aggregate of some kind, this function will traverse the
    423 /// tree from left to right (i.e. depth-first) looking for the first
    424 /// non-aggregate type which will play a role in function return.
    425 ///
    426 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
    427 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
    428 /// i32 in that type.
    429 static bool firstRealType(Type *Next,
    430                           SmallVectorImpl<CompositeType *> &SubTypes,
    431                           SmallVectorImpl<unsigned> &Path) {
    432   // First initialise the iterator components to the first "leaf" node
    433   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
    434   // despite nominally being an aggregate).
    435   while (Next->isAggregateType() &&
    436          indexReallyValid(cast<CompositeType>(Next), 0)) {
    437     SubTypes.push_back(cast<CompositeType>(Next));
    438     Path.push_back(0);
    439     Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
    440   }
    441 
    442   // If there's no Path now, Next was originally scalar already (or empty
    443   // leaf). We're done.
    444   if (Path.empty())
    445     return true;
    446 
    447   // Otherwise, use normal iteration to keep looking through the tree until we
    448   // find a non-aggregate type.
    449   while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
    450     if (!advanceToNextLeafType(SubTypes, Path))
    451       return false;
    452   }
    453 
    454   return true;
    455 }
    456 
    457 /// Set the iterator data-structures to the next non-empty, non-aggregate
    458 /// subtype.
    459 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
    460                          SmallVectorImpl<unsigned> &Path) {
    461   do {
    462     if (!advanceToNextLeafType(SubTypes, Path))
    463       return false;
    464 
    465     assert(!Path.empty() && "found a leaf but didn't set the path?");
    466   } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
    467 
    468   return true;
    469 }
    470 
    471 
    472 /// Test if the given instruction is in a position to be optimized
    473 /// with a tail-call. This roughly means that it's in a block with
    474 /// a return and there's nothing that needs to be scheduled
    475 /// between it and the return.
    476 ///
    477 /// This function only tests target-independent requirements.
    478 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const SelectionDAG &DAG) {
    479   const Instruction *I = CS.getInstruction();
    480   const BasicBlock *ExitBB = I->getParent();
    481   const TerminatorInst *Term = ExitBB->getTerminator();
    482   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
    483 
    484   // The block must end in a return statement or unreachable.
    485   //
    486   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
    487   // an unreachable, for now. The way tailcall optimization is currently
    488   // implemented means it will add an epilogue followed by a jump. That is
    489   // not profitable. Also, if the callee is a special function (e.g.
    490   // longjmp on x86), it can end up causing miscompilation that has not
    491   // been fully understood.
    492   if (!Ret &&
    493       (!DAG.getTarget().Options.GuaranteedTailCallOpt ||
    494        !isa<UnreachableInst>(Term)))
    495     return false;
    496 
    497   // If I will have a chain, make sure no other instruction that will have a
    498   // chain interposes between I and the return.
    499   if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
    500       !isSafeToSpeculativelyExecute(I))
    501     for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
    502       if (&*BBI == I)
    503         break;
    504       // Debug info intrinsics do not get in the way of tail call optimization.
    505       if (isa<DbgInfoIntrinsic>(BBI))
    506         continue;
    507       if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
    508           !isSafeToSpeculativelyExecute(BBI))
    509         return false;
    510     }
    511 
    512   return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret,
    513                                          *DAG.getTarget().getTargetLowering());
    514 }
    515 
    516 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
    517                                            const Instruction *I,
    518                                            const ReturnInst *Ret,
    519                                            const TargetLoweringBase &TLI) {
    520   // If the block ends with a void return or unreachable, it doesn't matter
    521   // what the call's return type is.
    522   if (!Ret || Ret->getNumOperands() == 0) return true;
    523 
    524   // If the return value is undef, it doesn't matter what the call's
    525   // return type is.
    526   if (isa<UndefValue>(Ret->getOperand(0))) return true;
    527 
    528   // Make sure the attributes attached to each return are compatible.
    529   AttrBuilder CallerAttrs(F->getAttributes(),
    530                           AttributeSet::ReturnIndex);
    531   AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
    532                           AttributeSet::ReturnIndex);
    533 
    534   // Noalias is completely benign as far as calling convention goes, it
    535   // shouldn't affect whether the call is a tail call.
    536   CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
    537   CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
    538 
    539   bool AllowDifferingSizes = true;
    540   if (CallerAttrs.contains(Attribute::ZExt)) {
    541     if (!CalleeAttrs.contains(Attribute::ZExt))
    542       return false;
    543 
    544     AllowDifferingSizes = false;
    545     CallerAttrs.removeAttribute(Attribute::ZExt);
    546     CalleeAttrs.removeAttribute(Attribute::ZExt);
    547   } else if (CallerAttrs.contains(Attribute::SExt)) {
    548     if (!CalleeAttrs.contains(Attribute::SExt))
    549       return false;
    550 
    551     AllowDifferingSizes = false;
    552     CallerAttrs.removeAttribute(Attribute::SExt);
    553     CalleeAttrs.removeAttribute(Attribute::SExt);
    554   }
    555 
    556   // If they're still different, there's some facet we don't understand
    557   // (currently only "inreg", but in future who knows). It may be OK but the
    558   // only safe option is to reject the tail call.
    559   if (CallerAttrs != CalleeAttrs)
    560     return false;
    561 
    562   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
    563   SmallVector<unsigned, 4> RetPath, CallPath;
    564   SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
    565 
    566   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
    567   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
    568 
    569   // Nothing's actually returned, it doesn't matter what the callee put there
    570   // it's a valid tail call.
    571   if (RetEmpty)
    572     return true;
    573 
    574   // Iterate pairwise through each of the value types making up the tail call
    575   // and the corresponding return. For each one we want to know whether it's
    576   // essentially going directly from the tail call to the ret, via operations
    577   // that end up not generating any code.
    578   //
    579   // We allow a certain amount of covariance here. For example it's permitted
    580   // for the tail call to define more bits than the ret actually cares about
    581   // (e.g. via a truncate).
    582   do {
    583     if (CallEmpty) {
    584       // We've exhausted the values produced by the tail call instruction, the
    585       // rest are essentially undef. The type doesn't really matter, but we need
    586       // *something*.
    587       Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
    588       CallVal = UndefValue::get(SlotType);
    589     }
    590 
    591     // The manipulations performed when we're looking through an insertvalue or
    592     // an extractvalue would happen at the front of the RetPath list, so since
    593     // we have to copy it anyway it's more efficient to create a reversed copy.
    594     using std::copy;
    595     SmallVector<unsigned, 4> TmpRetPath, TmpCallPath;
    596     copy(RetPath.rbegin(), RetPath.rend(), std::back_inserter(TmpRetPath));
    597     copy(CallPath.rbegin(), CallPath.rend(), std::back_inserter(TmpCallPath));
    598 
    599     // Finally, we can check whether the value produced by the tail call at this
    600     // index is compatible with the value we return.
    601     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
    602                               AllowDifferingSizes, TLI))
    603       return false;
    604 
    605     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
    606   } while(nextRealType(RetSubTypes, RetPath));
    607 
    608   return true;
    609 }
    610