1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines several CodeGen-specific LLVM IR analysis utilties. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/Analysis.h" 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/IR/DataLayout.h" 18 #include "llvm/IR/DerivedTypes.h" 19 #include "llvm/IR/Function.h" 20 #include "llvm/IR/Instructions.h" 21 #include "llvm/IR/IntrinsicInst.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/Support/ErrorHandling.h" 25 #include "llvm/Support/MathExtras.h" 26 #include "llvm/Target/TargetLowering.h" 27 using namespace llvm; 28 29 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence 30 /// of insertvalue or extractvalue indices that identify a member, return 31 /// the linearized index of the start of the member. 32 /// 33 unsigned llvm::ComputeLinearIndex(Type *Ty, 34 const unsigned *Indices, 35 const unsigned *IndicesEnd, 36 unsigned CurIndex) { 37 // Base case: We're done. 38 if (Indices && Indices == IndicesEnd) 39 return CurIndex; 40 41 // Given a struct type, recursively traverse the elements. 42 if (StructType *STy = dyn_cast<StructType>(Ty)) { 43 for (StructType::element_iterator EB = STy->element_begin(), 44 EI = EB, 45 EE = STy->element_end(); 46 EI != EE; ++EI) { 47 if (Indices && *Indices == unsigned(EI - EB)) 48 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex); 49 CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex); 50 } 51 return CurIndex; 52 } 53 // Given an array type, recursively traverse the elements. 54 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 55 Type *EltTy = ATy->getElementType(); 56 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) { 57 if (Indices && *Indices == i) 58 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex); 59 CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex); 60 } 61 return CurIndex; 62 } 63 // We haven't found the type we're looking for, so keep searching. 64 return CurIndex + 1; 65 } 66 67 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 68 /// EVTs that represent all the individual underlying 69 /// non-aggregate types that comprise it. 70 /// 71 /// If Offsets is non-null, it points to a vector to be filled in 72 /// with the in-memory offsets of each of the individual values. 73 /// 74 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, 75 SmallVectorImpl<EVT> &ValueVTs, 76 SmallVectorImpl<uint64_t> *Offsets, 77 uint64_t StartingOffset) { 78 // Given a struct type, recursively traverse the elements. 79 if (StructType *STy = dyn_cast<StructType>(Ty)) { 80 const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy); 81 for (StructType::element_iterator EB = STy->element_begin(), 82 EI = EB, 83 EE = STy->element_end(); 84 EI != EE; ++EI) 85 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, 86 StartingOffset + SL->getElementOffset(EI - EB)); 87 return; 88 } 89 // Given an array type, recursively traverse the elements. 90 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 91 Type *EltTy = ATy->getElementType(); 92 uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy); 93 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 94 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, 95 StartingOffset + i * EltSize); 96 return; 97 } 98 // Interpret void as zero return values. 99 if (Ty->isVoidTy()) 100 return; 101 // Base case: we can get an EVT for this LLVM IR type. 102 ValueVTs.push_back(TLI.getValueType(Ty)); 103 if (Offsets) 104 Offsets->push_back(StartingOffset); 105 } 106 107 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 108 GlobalVariable *llvm::ExtractTypeInfo(Value *V) { 109 V = V->stripPointerCasts(); 110 GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 111 112 if (GV && GV->getName() == "llvm.eh.catch.all.value") { 113 assert(GV->hasInitializer() && 114 "The EH catch-all value must have an initializer"); 115 Value *Init = GV->getInitializer(); 116 GV = dyn_cast<GlobalVariable>(Init); 117 if (!GV) V = cast<ConstantPointerNull>(Init); 118 } 119 120 assert((GV || isa<ConstantPointerNull>(V)) && 121 "TypeInfo must be a global variable or NULL"); 122 return GV; 123 } 124 125 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being 126 /// processed uses a memory 'm' constraint. 127 bool 128 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, 129 const TargetLowering &TLI) { 130 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { 131 InlineAsm::ConstraintInfo &CI = CInfos[i]; 132 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { 133 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); 134 if (CType == TargetLowering::C_Memory) 135 return true; 136 } 137 138 // Indirect operand accesses access memory. 139 if (CI.isIndirect) 140 return true; 141 } 142 143 return false; 144 } 145 146 /// getFCmpCondCode - Return the ISD condition code corresponding to 147 /// the given LLVM IR floating-point condition code. This includes 148 /// consideration of global floating-point math flags. 149 /// 150 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { 151 switch (Pred) { 152 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE; 153 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ; 154 case FCmpInst::FCMP_OGT: return ISD::SETOGT; 155 case FCmpInst::FCMP_OGE: return ISD::SETOGE; 156 case FCmpInst::FCMP_OLT: return ISD::SETOLT; 157 case FCmpInst::FCMP_OLE: return ISD::SETOLE; 158 case FCmpInst::FCMP_ONE: return ISD::SETONE; 159 case FCmpInst::FCMP_ORD: return ISD::SETO; 160 case FCmpInst::FCMP_UNO: return ISD::SETUO; 161 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ; 162 case FCmpInst::FCMP_UGT: return ISD::SETUGT; 163 case FCmpInst::FCMP_UGE: return ISD::SETUGE; 164 case FCmpInst::FCMP_ULT: return ISD::SETULT; 165 case FCmpInst::FCMP_ULE: return ISD::SETULE; 166 case FCmpInst::FCMP_UNE: return ISD::SETUNE; 167 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE; 168 default: llvm_unreachable("Invalid FCmp predicate opcode!"); 169 } 170 } 171 172 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) { 173 switch (CC) { 174 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ; 175 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE; 176 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT; 177 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE; 178 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT; 179 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE; 180 default: return CC; 181 } 182 } 183 184 /// getICmpCondCode - Return the ISD condition code corresponding to 185 /// the given LLVM IR integer condition code. 186 /// 187 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { 188 switch (Pred) { 189 case ICmpInst::ICMP_EQ: return ISD::SETEQ; 190 case ICmpInst::ICMP_NE: return ISD::SETNE; 191 case ICmpInst::ICMP_SLE: return ISD::SETLE; 192 case ICmpInst::ICMP_ULE: return ISD::SETULE; 193 case ICmpInst::ICMP_SGE: return ISD::SETGE; 194 case ICmpInst::ICMP_UGE: return ISD::SETUGE; 195 case ICmpInst::ICMP_SLT: return ISD::SETLT; 196 case ICmpInst::ICMP_ULT: return ISD::SETULT; 197 case ICmpInst::ICMP_SGT: return ISD::SETGT; 198 case ICmpInst::ICMP_UGT: return ISD::SETUGT; 199 default: 200 llvm_unreachable("Invalid ICmp predicate opcode!"); 201 } 202 } 203 204 205 /// getNoopInput - If V is a noop (i.e., lowers to no machine code), look 206 /// through it (and any transitive noop operands to it) and return its input 207 /// value. This is used to determine if a tail call can be formed. 208 /// 209 static const Value *getNoopInput(const Value *V, const TargetLowering &TLI) { 210 // If V is not an instruction, it can't be looked through. 211 const Instruction *I = dyn_cast<Instruction>(V); 212 if (I == 0 || !I->hasOneUse() || I->getNumOperands() == 0) return V; 213 214 Value *Op = I->getOperand(0); 215 216 // Look through truly no-op truncates. 217 if (isa<TruncInst>(I) && 218 TLI.isTruncateFree(I->getOperand(0)->getType(), I->getType())) 219 return getNoopInput(I->getOperand(0), TLI); 220 221 // Look through truly no-op bitcasts. 222 if (isa<BitCastInst>(I)) { 223 // No type change at all. 224 if (Op->getType() == I->getType()) 225 return getNoopInput(Op, TLI); 226 227 // Pointer to pointer cast. 228 if (Op->getType()->isPointerTy() && I->getType()->isPointerTy()) 229 return getNoopInput(Op, TLI); 230 231 if (isa<VectorType>(Op->getType()) && isa<VectorType>(I->getType()) && 232 TLI.isTypeLegal(EVT::getEVT(Op->getType())) && 233 TLI.isTypeLegal(EVT::getEVT(I->getType()))) 234 return getNoopInput(Op, TLI); 235 } 236 237 // Look through inttoptr. 238 if (isa<IntToPtrInst>(I) && !isa<VectorType>(I->getType())) { 239 // Make sure this isn't a truncating or extending cast. We could support 240 // this eventually, but don't bother for now. 241 if (TLI.getPointerTy().getSizeInBits() == 242 cast<IntegerType>(Op->getType())->getBitWidth()) 243 return getNoopInput(Op, TLI); 244 } 245 246 // Look through ptrtoint. 247 if (isa<PtrToIntInst>(I) && !isa<VectorType>(I->getType())) { 248 // Make sure this isn't a truncating or extending cast. We could support 249 // this eventually, but don't bother for now. 250 if (TLI.getPointerTy().getSizeInBits() == 251 cast<IntegerType>(I->getType())->getBitWidth()) 252 return getNoopInput(Op, TLI); 253 } 254 255 256 // Otherwise it's not something we can look through. 257 return V; 258 } 259 260 261 /// Test if the given instruction is in a position to be optimized 262 /// with a tail-call. This roughly means that it's in a block with 263 /// a return and there's nothing that needs to be scheduled 264 /// between it and the return. 265 /// 266 /// This function only tests target-independent requirements. 267 bool llvm::isInTailCallPosition(ImmutableCallSite CS,const TargetLowering &TLI){ 268 const Instruction *I = CS.getInstruction(); 269 const BasicBlock *ExitBB = I->getParent(); 270 const TerminatorInst *Term = ExitBB->getTerminator(); 271 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); 272 273 // The block must end in a return statement or unreachable. 274 // 275 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in 276 // an unreachable, for now. The way tailcall optimization is currently 277 // implemented means it will add an epilogue followed by a jump. That is 278 // not profitable. Also, if the callee is a special function (e.g. 279 // longjmp on x86), it can end up causing miscompilation that has not 280 // been fully understood. 281 if (!Ret && 282 (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt || 283 !isa<UnreachableInst>(Term))) 284 return false; 285 286 // If I will have a chain, make sure no other instruction that will have a 287 // chain interposes between I and the return. 288 if (I->mayHaveSideEffects() || I->mayReadFromMemory() || 289 !isSafeToSpeculativelyExecute(I)) 290 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; 291 --BBI) { 292 if (&*BBI == I) 293 break; 294 // Debug info intrinsics do not get in the way of tail call optimization. 295 if (isa<DbgInfoIntrinsic>(BBI)) 296 continue; 297 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || 298 !isSafeToSpeculativelyExecute(BBI)) 299 return false; 300 } 301 302 // If the block ends with a void return or unreachable, it doesn't matter 303 // what the call's return type is. 304 if (!Ret || Ret->getNumOperands() == 0) return true; 305 306 // If the return value is undef, it doesn't matter what the call's 307 // return type is. 308 if (isa<UndefValue>(Ret->getOperand(0))) return true; 309 310 // Conservatively require the attributes of the call to match those of 311 // the return. Ignore noalias because it doesn't affect the call sequence. 312 const Function *F = ExitBB->getParent(); 313 AttributeSet CallerAttrs = F->getAttributes(); 314 if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex). 315 removeAttribute(Attribute::NoAlias) != 316 AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex). 317 removeAttribute(Attribute::NoAlias)) 318 return false; 319 320 // It's not safe to eliminate the sign / zero extension of the return value. 321 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || 322 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) 323 return false; 324 325 // Otherwise, make sure the unmodified return value of I is the return value. 326 // We handle two cases: multiple return values + scalars. 327 Value *RetVal = Ret->getOperand(0); 328 if (!isa<InsertValueInst>(RetVal) || !isa<StructType>(RetVal->getType())) 329 // Handle scalars first. 330 return getNoopInput(Ret->getOperand(0), TLI) == I; 331 332 // If this is an aggregate return, look through the insert/extract values and 333 // see if each is transparent. 334 for (unsigned i = 0, e =cast<StructType>(RetVal->getType())->getNumElements(); 335 i != e; ++i) { 336 const Value *InScalar = FindInsertedValue(RetVal, i); 337 if (InScalar == 0) return false; 338 InScalar = getNoopInput(InScalar, TLI); 339 340 // If the scalar value being inserted is an extractvalue of the right index 341 // from the call, then everything is good. 342 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(InScalar); 343 if (EVI == 0 || EVI->getOperand(0) != I || EVI->getNumIndices() != 1 || 344 EVI->getIndices()[0] != i) 345 return false; 346 } 347 348 return true; 349 } 350