1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the function verifier interface, that can be used for some 11 // sanity checking of input to the system. 12 // 13 // Note that this does not provide full `Java style' security and verifications, 14 // instead it just tries to ensure that code is well-formed. 15 // 16 // * Both of a binary operator's parameters are of the same type 17 // * Verify that the indices of mem access instructions match other operands 18 // * Verify that arithmetic and other things are only performed on first-class 19 // types. Verify that shifts & logicals only happen on integrals f.e. 20 // * All of the constants in a switch statement are of the correct type 21 // * The code is in valid SSA form 22 // * It should be illegal to put a label into any other type (like a structure) 23 // or to return one. [except constant arrays!] 24 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad 25 // * PHI nodes must have an entry for each predecessor, with no extras. 26 // * PHI nodes must be the first thing in a basic block, all grouped together 27 // * PHI nodes must have at least one entry 28 // * All basic blocks should only end with terminator insts, not contain them 29 // * The entry node to a function must not have predecessors 30 // * All Instructions must be embedded into a basic block 31 // * Functions cannot take a void-typed parameter 32 // * Verify that a function's argument list agrees with it's declared type. 33 // * It is illegal to specify a name for a void value. 34 // * It is illegal to have a internal global value with no initializer 35 // * It is illegal to have a ret instruction that returns a value that does not 36 // agree with the function return value type. 37 // * Function call argument types match the function prototype 38 // * A landing pad is defined by a landingpad instruction, and can be jumped to 39 // only by the unwind edge of an invoke instruction. 40 // * A landingpad instruction must be the first non-PHI instruction in the 41 // block. 42 // * Landingpad instructions must be in a function with a personality function. 43 // * All other things that are tested by asserts spread about the code... 44 // 45 //===----------------------------------------------------------------------===// 46 47 #include "llvm/IR/Verifier.h" 48 #include "llvm/ADT/MapVector.h" 49 #include "llvm/ADT/STLExtras.h" 50 #include "llvm/ADT/SetVector.h" 51 #include "llvm/ADT/SmallPtrSet.h" 52 #include "llvm/ADT/SmallVector.h" 53 #include "llvm/ADT/StringExtras.h" 54 #include "llvm/IR/CFG.h" 55 #include "llvm/IR/CallSite.h" 56 #include "llvm/IR/CallingConv.h" 57 #include "llvm/IR/ConstantRange.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugInfo.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/DiagnosticInfo.h" 63 #include "llvm/IR/Dominators.h" 64 #include "llvm/IR/InlineAsm.h" 65 #include "llvm/IR/InstIterator.h" 66 #include "llvm/IR/InstVisitor.h" 67 #include "llvm/IR/IntrinsicInst.h" 68 #include "llvm/IR/LLVMContext.h" 69 #include "llvm/IR/Metadata.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/ModuleSlotTracker.h" 72 #include "llvm/IR/PassManager.h" 73 #include "llvm/IR/Statepoint.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/CommandLine.h" 76 #include "llvm/Support/Debug.h" 77 #include "llvm/Support/ErrorHandling.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include <algorithm> 80 #include <cstdarg> 81 using namespace llvm; 82 83 static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(true)); 84 85 namespace { 86 struct VerifierSupport { 87 raw_ostream *OS; 88 const Module *M = nullptr; 89 Optional<ModuleSlotTracker> MST; 90 91 /// Track the brokenness of the module while recursively visiting. 92 bool Broken = false; 93 /// Broken debug info can be "recovered" from by stripping the debug info. 94 bool BrokenDebugInfo = false; 95 /// Whether to treat broken debug info as an error. 96 bool TreatBrokenDebugInfoAsError = true; 97 98 explicit VerifierSupport(raw_ostream *OS) : OS(OS) {} 99 100 private: 101 template <class NodeTy> void Write(const ilist_iterator<NodeTy> &I) { 102 Write(&*I); 103 } 104 105 void Write(const Module *M) { 106 if (!M) 107 return; 108 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n"; 109 } 110 111 void Write(const Value *V) { 112 if (!V) 113 return; 114 if (isa<Instruction>(V)) { 115 V->print(*OS, *MST); 116 *OS << '\n'; 117 } else { 118 V->printAsOperand(*OS, true, *MST); 119 *OS << '\n'; 120 } 121 } 122 void Write(ImmutableCallSite CS) { 123 Write(CS.getInstruction()); 124 } 125 126 void Write(const Metadata *MD) { 127 if (!MD) 128 return; 129 MD->print(*OS, *MST, M); 130 *OS << '\n'; 131 } 132 133 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) { 134 Write(MD.get()); 135 } 136 137 void Write(const NamedMDNode *NMD) { 138 if (!NMD) 139 return; 140 NMD->print(*OS, *MST); 141 *OS << '\n'; 142 } 143 144 void Write(Type *T) { 145 if (!T) 146 return; 147 *OS << ' ' << *T; 148 } 149 150 void Write(const Comdat *C) { 151 if (!C) 152 return; 153 *OS << *C; 154 } 155 156 template <typename T> void Write(ArrayRef<T> Vs) { 157 for (const T &V : Vs) 158 Write(V); 159 } 160 161 template <typename T1, typename... Ts> 162 void WriteTs(const T1 &V1, const Ts &... Vs) { 163 Write(V1); 164 WriteTs(Vs...); 165 } 166 167 template <typename... Ts> void WriteTs() {} 168 169 public: 170 /// \brief A check failed, so printout out the condition and the message. 171 /// 172 /// This provides a nice place to put a breakpoint if you want to see why 173 /// something is not correct. 174 void CheckFailed(const Twine &Message) { 175 if (OS) 176 *OS << Message << '\n'; 177 Broken = true; 178 } 179 180 /// \brief A check failed (with values to print). 181 /// 182 /// This calls the Message-only version so that the above is easier to set a 183 /// breakpoint on. 184 template <typename T1, typename... Ts> 185 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) { 186 CheckFailed(Message); 187 if (OS) 188 WriteTs(V1, Vs...); 189 } 190 191 /// A debug info check failed. 192 void DebugInfoCheckFailed(const Twine &Message) { 193 if (OS) 194 *OS << Message << '\n'; 195 Broken |= TreatBrokenDebugInfoAsError; 196 BrokenDebugInfo = true; 197 } 198 199 /// A debug info check failed (with values to print). 200 template <typename T1, typename... Ts> 201 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, 202 const Ts &... Vs) { 203 DebugInfoCheckFailed(Message); 204 if (OS) 205 WriteTs(V1, Vs...); 206 } 207 }; 208 209 class Verifier : public InstVisitor<Verifier>, VerifierSupport { 210 friend class InstVisitor<Verifier>; 211 212 LLVMContext *Context; 213 DominatorTree DT; 214 215 /// \brief When verifying a basic block, keep track of all of the 216 /// instructions we have seen so far. 217 /// 218 /// This allows us to do efficient dominance checks for the case when an 219 /// instruction has an operand that is an instruction in the same block. 220 SmallPtrSet<Instruction *, 16> InstsInThisBlock; 221 222 /// \brief Keep track of the metadata nodes that have been checked already. 223 SmallPtrSet<const Metadata *, 32> MDNodes; 224 225 /// Track all DICompileUnits visited. 226 SmallPtrSet<const Metadata *, 2> CUVisited; 227 228 /// \brief The result type for a landingpad. 229 Type *LandingPadResultTy; 230 231 /// \brief Whether we've seen a call to @llvm.localescape in this function 232 /// already. 233 bool SawFrameEscape; 234 235 /// Stores the count of how many objects were passed to llvm.localescape for a 236 /// given function and the largest index passed to llvm.localrecover. 237 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo; 238 239 // Maps catchswitches and cleanuppads that unwind to siblings to the 240 // terminators that indicate the unwind, used to detect cycles therein. 241 MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo; 242 243 /// Cache of constants visited in search of ConstantExprs. 244 SmallPtrSet<const Constant *, 32> ConstantExprVisited; 245 246 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic. 247 SmallVector<const Function *, 4> DeoptimizeDeclarations; 248 249 // Verify that this GlobalValue is only used in this module. 250 // This map is used to avoid visiting uses twice. We can arrive at a user 251 // twice, if they have multiple operands. In particular for very large 252 // constant expressions, we can arrive at a particular user many times. 253 SmallPtrSet<const Value *, 32> GlobalValueVisited; 254 255 void checkAtomicMemAccessSize(const Module *M, Type *Ty, 256 const Instruction *I); 257 258 void updateModule(const Module *NewM) { 259 if (M == NewM) 260 return; 261 MST.emplace(NewM); 262 M = NewM; 263 } 264 265 public: 266 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError) 267 : VerifierSupport(OS), Context(nullptr), LandingPadResultTy(nullptr), 268 SawFrameEscape(false) { 269 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError; 270 } 271 272 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; } 273 274 bool verify(const Function &F) { 275 updateModule(F.getParent()); 276 Context = &M->getContext(); 277 278 // First ensure the function is well-enough formed to compute dominance 279 // information, and directly compute a dominance tree. We don't rely on the 280 // pass manager to provide this as it isolates us from a potentially 281 // out-of-date dominator tree and makes it significantly more complex to run 282 // this code outside of a pass manager. 283 // FIXME: It's really gross that we have to cast away constness here. 284 if (!F.empty()) 285 DT.recalculate(const_cast<Function &>(F)); 286 287 for (const BasicBlock &BB : F) { 288 if (!BB.empty() && BB.back().isTerminator()) 289 continue; 290 291 if (OS) { 292 *OS << "Basic Block in function '" << F.getName() 293 << "' does not have terminator!\n"; 294 BB.printAsOperand(*OS, true, *MST); 295 *OS << "\n"; 296 } 297 return false; 298 } 299 300 Broken = false; 301 // FIXME: We strip const here because the inst visitor strips const. 302 visit(const_cast<Function &>(F)); 303 verifySiblingFuncletUnwinds(); 304 InstsInThisBlock.clear(); 305 LandingPadResultTy = nullptr; 306 SawFrameEscape = false; 307 SiblingFuncletInfo.clear(); 308 309 return !Broken; 310 } 311 312 bool verify(const Module &M) { 313 updateModule(&M); 314 Context = &M.getContext(); 315 Broken = false; 316 317 // Collect all declarations of the llvm.experimental.deoptimize intrinsic. 318 for (const Function &F : M) 319 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize) 320 DeoptimizeDeclarations.push_back(&F); 321 322 // Now that we've visited every function, verify that we never asked to 323 // recover a frame index that wasn't escaped. 324 verifyFrameRecoverIndices(); 325 for (const GlobalVariable &GV : M.globals()) 326 visitGlobalVariable(GV); 327 328 for (const GlobalAlias &GA : M.aliases()) 329 visitGlobalAlias(GA); 330 331 for (const NamedMDNode &NMD : M.named_metadata()) 332 visitNamedMDNode(NMD); 333 334 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable()) 335 visitComdat(SMEC.getValue()); 336 337 visitModuleFlags(M); 338 visitModuleIdents(M); 339 340 verifyCompileUnits(); 341 342 verifyDeoptimizeCallingConvs(); 343 344 return !Broken; 345 } 346 347 private: 348 // Verification methods... 349 void visitGlobalValue(const GlobalValue &GV); 350 void visitGlobalVariable(const GlobalVariable &GV); 351 void visitGlobalAlias(const GlobalAlias &GA); 352 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C); 353 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited, 354 const GlobalAlias &A, const Constant &C); 355 void visitNamedMDNode(const NamedMDNode &NMD); 356 void visitMDNode(const MDNode &MD); 357 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F); 358 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F); 359 void visitComdat(const Comdat &C); 360 void visitModuleIdents(const Module &M); 361 void visitModuleFlags(const Module &M); 362 void visitModuleFlag(const MDNode *Op, 363 DenseMap<const MDString *, const MDNode *> &SeenIDs, 364 SmallVectorImpl<const MDNode *> &Requirements); 365 void visitFunction(const Function &F); 366 void visitBasicBlock(BasicBlock &BB); 367 void visitRangeMetadata(Instruction& I, MDNode* Range, Type* Ty); 368 void visitDereferenceableMetadata(Instruction& I, MDNode* MD); 369 370 template <class Ty> bool isValidMetadataArray(const MDTuple &N); 371 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N); 372 #include "llvm/IR/Metadata.def" 373 void visitDIScope(const DIScope &N); 374 void visitDIVariable(const DIVariable &N); 375 void visitDILexicalBlockBase(const DILexicalBlockBase &N); 376 void visitDITemplateParameter(const DITemplateParameter &N); 377 378 void visitTemplateParams(const MDNode &N, const Metadata &RawParams); 379 380 // InstVisitor overrides... 381 using InstVisitor<Verifier>::visit; 382 void visit(Instruction &I); 383 384 void visitTruncInst(TruncInst &I); 385 void visitZExtInst(ZExtInst &I); 386 void visitSExtInst(SExtInst &I); 387 void visitFPTruncInst(FPTruncInst &I); 388 void visitFPExtInst(FPExtInst &I); 389 void visitFPToUIInst(FPToUIInst &I); 390 void visitFPToSIInst(FPToSIInst &I); 391 void visitUIToFPInst(UIToFPInst &I); 392 void visitSIToFPInst(SIToFPInst &I); 393 void visitIntToPtrInst(IntToPtrInst &I); 394 void visitPtrToIntInst(PtrToIntInst &I); 395 void visitBitCastInst(BitCastInst &I); 396 void visitAddrSpaceCastInst(AddrSpaceCastInst &I); 397 void visitPHINode(PHINode &PN); 398 void visitBinaryOperator(BinaryOperator &B); 399 void visitICmpInst(ICmpInst &IC); 400 void visitFCmpInst(FCmpInst &FC); 401 void visitExtractElementInst(ExtractElementInst &EI); 402 void visitInsertElementInst(InsertElementInst &EI); 403 void visitShuffleVectorInst(ShuffleVectorInst &EI); 404 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); } 405 void visitCallInst(CallInst &CI); 406 void visitInvokeInst(InvokeInst &II); 407 void visitGetElementPtrInst(GetElementPtrInst &GEP); 408 void visitLoadInst(LoadInst &LI); 409 void visitStoreInst(StoreInst &SI); 410 void verifyDominatesUse(Instruction &I, unsigned i); 411 void visitInstruction(Instruction &I); 412 void visitTerminatorInst(TerminatorInst &I); 413 void visitBranchInst(BranchInst &BI); 414 void visitReturnInst(ReturnInst &RI); 415 void visitSwitchInst(SwitchInst &SI); 416 void visitIndirectBrInst(IndirectBrInst &BI); 417 void visitSelectInst(SelectInst &SI); 418 void visitUserOp1(Instruction &I); 419 void visitUserOp2(Instruction &I) { visitUserOp1(I); } 420 void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS); 421 template <class DbgIntrinsicTy> 422 void visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII); 423 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI); 424 void visitAtomicRMWInst(AtomicRMWInst &RMWI); 425 void visitFenceInst(FenceInst &FI); 426 void visitAllocaInst(AllocaInst &AI); 427 void visitExtractValueInst(ExtractValueInst &EVI); 428 void visitInsertValueInst(InsertValueInst &IVI); 429 void visitEHPadPredecessors(Instruction &I); 430 void visitLandingPadInst(LandingPadInst &LPI); 431 void visitCatchPadInst(CatchPadInst &CPI); 432 void visitCatchReturnInst(CatchReturnInst &CatchReturn); 433 void visitCleanupPadInst(CleanupPadInst &CPI); 434 void visitFuncletPadInst(FuncletPadInst &FPI); 435 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch); 436 void visitCleanupReturnInst(CleanupReturnInst &CRI); 437 438 void verifyCallSite(CallSite CS); 439 void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal); 440 void verifySwiftErrorValue(const Value *SwiftErrorVal); 441 void verifyMustTailCall(CallInst &CI); 442 bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT, 443 unsigned ArgNo, std::string &Suffix); 444 bool verifyAttributeCount(AttributeSet Attrs, unsigned Params); 445 void verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction, 446 const Value *V); 447 void verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, 448 bool isReturnValue, const Value *V); 449 void verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, 450 const Value *V); 451 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs); 452 453 void visitConstantExprsRecursively(const Constant *EntryC); 454 void visitConstantExpr(const ConstantExpr *CE); 455 void verifyStatepoint(ImmutableCallSite CS); 456 void verifyFrameRecoverIndices(); 457 void verifySiblingFuncletUnwinds(); 458 459 void verifyBitPieceExpression(const DbgInfoIntrinsic &I); 460 461 /// Module-level debug info verification... 462 void verifyCompileUnits(); 463 464 /// Module-level verification that all @llvm.experimental.deoptimize 465 /// declarations share the same calling convention. 466 void verifyDeoptimizeCallingConvs(); 467 }; 468 } // End anonymous namespace 469 470 /// We know that cond should be true, if not print an error message. 471 #define Assert(C, ...) \ 472 do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (0) 473 474 /// We know that a debug info condition should be true, if not print 475 /// an error message. 476 #define AssertDI(C, ...) \ 477 do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (0) 478 479 480 void Verifier::visit(Instruction &I) { 481 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) 482 Assert(I.getOperand(i) != nullptr, "Operand is null", &I); 483 InstVisitor<Verifier>::visit(I); 484 } 485 486 // Helper to recursively iterate over indirect users. By 487 // returning false, the callback can ask to stop recursing 488 // further. 489 static void forEachUser(const Value *User, 490 SmallPtrSet<const Value *, 32> &Visited, 491 llvm::function_ref<bool(const Value *)> Callback) { 492 if (!Visited.insert(User).second) 493 return; 494 for (const Value *TheNextUser : User->materialized_users()) 495 if (Callback(TheNextUser)) 496 forEachUser(TheNextUser, Visited, Callback); 497 } 498 499 void Verifier::visitGlobalValue(const GlobalValue &GV) { 500 Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(), 501 "Global is external, but doesn't have external or weak linkage!", &GV); 502 503 Assert(GV.getAlignment() <= Value::MaximumAlignment, 504 "huge alignment values are unsupported", &GV); 505 Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV), 506 "Only global variables can have appending linkage!", &GV); 507 508 if (GV.hasAppendingLinkage()) { 509 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV); 510 Assert(GVar && GVar->getValueType()->isArrayTy(), 511 "Only global arrays can have appending linkage!", GVar); 512 } 513 514 if (GV.isDeclarationForLinker()) 515 Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV); 516 517 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool { 518 if (const Instruction *I = dyn_cast<Instruction>(V)) { 519 if (!I->getParent() || !I->getParent()->getParent()) 520 CheckFailed("Global is referenced by parentless instruction!", &GV, 521 M, I); 522 else if (I->getParent()->getParent()->getParent() != M) 523 CheckFailed("Global is referenced in a different module!", &GV, 524 M, I, I->getParent()->getParent(), 525 I->getParent()->getParent()->getParent()); 526 return false; 527 } else if (const Function *F = dyn_cast<Function>(V)) { 528 if (F->getParent() != M) 529 CheckFailed("Global is used by function in a different module", &GV, 530 M, F, F->getParent()); 531 return false; 532 } 533 return true; 534 }); 535 } 536 537 void Verifier::visitGlobalVariable(const GlobalVariable &GV) { 538 if (GV.hasInitializer()) { 539 Assert(GV.getInitializer()->getType() == GV.getValueType(), 540 "Global variable initializer type does not match global " 541 "variable type!", 542 &GV); 543 544 // If the global has common linkage, it must have a zero initializer and 545 // cannot be constant. 546 if (GV.hasCommonLinkage()) { 547 Assert(GV.getInitializer()->isNullValue(), 548 "'common' global must have a zero initializer!", &GV); 549 Assert(!GV.isConstant(), "'common' global may not be marked constant!", 550 &GV); 551 Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV); 552 } 553 } 554 555 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" || 556 GV.getName() == "llvm.global_dtors")) { 557 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), 558 "invalid linkage for intrinsic global variable", &GV); 559 // Don't worry about emitting an error for it not being an array, 560 // visitGlobalValue will complain on appending non-array. 561 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) { 562 StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 563 PointerType *FuncPtrTy = 564 FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo(); 565 // FIXME: Reject the 2-field form in LLVM 4.0. 566 Assert(STy && 567 (STy->getNumElements() == 2 || STy->getNumElements() == 3) && 568 STy->getTypeAtIndex(0u)->isIntegerTy(32) && 569 STy->getTypeAtIndex(1) == FuncPtrTy, 570 "wrong type for intrinsic global variable", &GV); 571 if (STy->getNumElements() == 3) { 572 Type *ETy = STy->getTypeAtIndex(2); 573 Assert(ETy->isPointerTy() && 574 cast<PointerType>(ETy)->getElementType()->isIntegerTy(8), 575 "wrong type for intrinsic global variable", &GV); 576 } 577 } 578 } 579 580 if (GV.hasName() && (GV.getName() == "llvm.used" || 581 GV.getName() == "llvm.compiler.used")) { 582 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), 583 "invalid linkage for intrinsic global variable", &GV); 584 Type *GVType = GV.getValueType(); 585 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) { 586 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType()); 587 Assert(PTy, "wrong type for intrinsic global variable", &GV); 588 if (GV.hasInitializer()) { 589 const Constant *Init = GV.getInitializer(); 590 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init); 591 Assert(InitArray, "wrong initalizer for intrinsic global variable", 592 Init); 593 for (Value *Op : InitArray->operands()) { 594 Value *V = Op->stripPointerCastsNoFollowAliases(); 595 Assert(isa<GlobalVariable>(V) || isa<Function>(V) || 596 isa<GlobalAlias>(V), 597 "invalid llvm.used member", V); 598 Assert(V->hasName(), "members of llvm.used must be named", V); 599 } 600 } 601 } 602 } 603 604 Assert(!GV.hasDLLImportStorageClass() || 605 (GV.isDeclaration() && GV.hasExternalLinkage()) || 606 GV.hasAvailableExternallyLinkage(), 607 "Global is marked as dllimport, but not external", &GV); 608 609 if (!GV.hasInitializer()) { 610 visitGlobalValue(GV); 611 return; 612 } 613 614 // Walk any aggregate initializers looking for bitcasts between address spaces 615 visitConstantExprsRecursively(GV.getInitializer()); 616 617 visitGlobalValue(GV); 618 } 619 620 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) { 621 SmallPtrSet<const GlobalAlias*, 4> Visited; 622 Visited.insert(&GA); 623 visitAliaseeSubExpr(Visited, GA, C); 624 } 625 626 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited, 627 const GlobalAlias &GA, const Constant &C) { 628 if (const auto *GV = dyn_cast<GlobalValue>(&C)) { 629 Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition", 630 &GA); 631 632 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) { 633 Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA); 634 635 Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias", 636 &GA); 637 } else { 638 // Only continue verifying subexpressions of GlobalAliases. 639 // Do not recurse into global initializers. 640 return; 641 } 642 } 643 644 if (const auto *CE = dyn_cast<ConstantExpr>(&C)) 645 visitConstantExprsRecursively(CE); 646 647 for (const Use &U : C.operands()) { 648 Value *V = &*U; 649 if (const auto *GA2 = dyn_cast<GlobalAlias>(V)) 650 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee()); 651 else if (const auto *C2 = dyn_cast<Constant>(V)) 652 visitAliaseeSubExpr(Visited, GA, *C2); 653 } 654 } 655 656 void Verifier::visitGlobalAlias(const GlobalAlias &GA) { 657 Assert(GlobalAlias::isValidLinkage(GA.getLinkage()), 658 "Alias should have private, internal, linkonce, weak, linkonce_odr, " 659 "weak_odr, or external linkage!", 660 &GA); 661 const Constant *Aliasee = GA.getAliasee(); 662 Assert(Aliasee, "Aliasee cannot be NULL!", &GA); 663 Assert(GA.getType() == Aliasee->getType(), 664 "Alias and aliasee types should match!", &GA); 665 666 Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee), 667 "Aliasee should be either GlobalValue or ConstantExpr", &GA); 668 669 visitAliaseeSubExpr(GA, *Aliasee); 670 671 visitGlobalValue(GA); 672 } 673 674 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) { 675 for (const MDNode *MD : NMD.operands()) { 676 if (NMD.getName() == "llvm.dbg.cu") { 677 AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD); 678 } 679 680 if (!MD) 681 continue; 682 683 visitMDNode(*MD); 684 } 685 } 686 687 void Verifier::visitMDNode(const MDNode &MD) { 688 // Only visit each node once. Metadata can be mutually recursive, so this 689 // avoids infinite recursion here, as well as being an optimization. 690 if (!MDNodes.insert(&MD).second) 691 return; 692 693 switch (MD.getMetadataID()) { 694 default: 695 llvm_unreachable("Invalid MDNode subclass"); 696 case Metadata::MDTupleKind: 697 break; 698 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \ 699 case Metadata::CLASS##Kind: \ 700 visit##CLASS(cast<CLASS>(MD)); \ 701 break; 702 #include "llvm/IR/Metadata.def" 703 } 704 705 for (const Metadata *Op : MD.operands()) { 706 if (!Op) 707 continue; 708 Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!", 709 &MD, Op); 710 if (auto *N = dyn_cast<MDNode>(Op)) { 711 visitMDNode(*N); 712 continue; 713 } 714 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) { 715 visitValueAsMetadata(*V, nullptr); 716 continue; 717 } 718 } 719 720 // Check these last, so we diagnose problems in operands first. 721 Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD); 722 Assert(MD.isResolved(), "All nodes should be resolved!", &MD); 723 } 724 725 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) { 726 Assert(MD.getValue(), "Expected valid value", &MD); 727 Assert(!MD.getValue()->getType()->isMetadataTy(), 728 "Unexpected metadata round-trip through values", &MD, MD.getValue()); 729 730 auto *L = dyn_cast<LocalAsMetadata>(&MD); 731 if (!L) 732 return; 733 734 Assert(F, "function-local metadata used outside a function", L); 735 736 // If this was an instruction, bb, or argument, verify that it is in the 737 // function that we expect. 738 Function *ActualF = nullptr; 739 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) { 740 Assert(I->getParent(), "function-local metadata not in basic block", L, I); 741 ActualF = I->getParent()->getParent(); 742 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue())) 743 ActualF = BB->getParent(); 744 else if (Argument *A = dyn_cast<Argument>(L->getValue())) 745 ActualF = A->getParent(); 746 assert(ActualF && "Unimplemented function local metadata case!"); 747 748 Assert(ActualF == F, "function-local metadata used in wrong function", L); 749 } 750 751 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) { 752 Metadata *MD = MDV.getMetadata(); 753 if (auto *N = dyn_cast<MDNode>(MD)) { 754 visitMDNode(*N); 755 return; 756 } 757 758 // Only visit each node once. Metadata can be mutually recursive, so this 759 // avoids infinite recursion here, as well as being an optimization. 760 if (!MDNodes.insert(MD).second) 761 return; 762 763 if (auto *V = dyn_cast<ValueAsMetadata>(MD)) 764 visitValueAsMetadata(*V, F); 765 } 766 767 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); } 768 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); } 769 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); } 770 771 template <class Ty> 772 bool isValidMetadataArrayImpl(const MDTuple &N, bool AllowNull) { 773 for (Metadata *MD : N.operands()) { 774 if (MD) { 775 if (!isa<Ty>(MD)) 776 return false; 777 } else { 778 if (!AllowNull) 779 return false; 780 } 781 } 782 return true; 783 } 784 785 template <class Ty> 786 bool isValidMetadataArray(const MDTuple &N) { 787 return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ false); 788 } 789 790 template <class Ty> 791 bool isValidMetadataNullArray(const MDTuple &N) { 792 return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ true); 793 } 794 795 void Verifier::visitDILocation(const DILocation &N) { 796 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 797 "location requires a valid scope", &N, N.getRawScope()); 798 if (auto *IA = N.getRawInlinedAt()) 799 AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA); 800 } 801 802 void Verifier::visitGenericDINode(const GenericDINode &N) { 803 AssertDI(N.getTag(), "invalid tag", &N); 804 } 805 806 void Verifier::visitDIScope(const DIScope &N) { 807 if (auto *F = N.getRawFile()) 808 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 809 } 810 811 void Verifier::visitDISubrange(const DISubrange &N) { 812 AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N); 813 AssertDI(N.getCount() >= -1, "invalid subrange count", &N); 814 } 815 816 void Verifier::visitDIEnumerator(const DIEnumerator &N) { 817 AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N); 818 } 819 820 void Verifier::visitDIBasicType(const DIBasicType &N) { 821 AssertDI(N.getTag() == dwarf::DW_TAG_base_type || 822 N.getTag() == dwarf::DW_TAG_unspecified_type, 823 "invalid tag", &N); 824 } 825 826 void Verifier::visitDIDerivedType(const DIDerivedType &N) { 827 // Common scope checks. 828 visitDIScope(N); 829 830 AssertDI(N.getTag() == dwarf::DW_TAG_typedef || 831 N.getTag() == dwarf::DW_TAG_pointer_type || 832 N.getTag() == dwarf::DW_TAG_ptr_to_member_type || 833 N.getTag() == dwarf::DW_TAG_reference_type || 834 N.getTag() == dwarf::DW_TAG_rvalue_reference_type || 835 N.getTag() == dwarf::DW_TAG_const_type || 836 N.getTag() == dwarf::DW_TAG_volatile_type || 837 N.getTag() == dwarf::DW_TAG_restrict_type || 838 N.getTag() == dwarf::DW_TAG_member || 839 N.getTag() == dwarf::DW_TAG_inheritance || 840 N.getTag() == dwarf::DW_TAG_friend, 841 "invalid tag", &N); 842 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) { 843 AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N, 844 N.getRawExtraData()); 845 } 846 847 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 848 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N, 849 N.getRawBaseType()); 850 } 851 852 static bool hasConflictingReferenceFlags(unsigned Flags) { 853 return (Flags & DINode::FlagLValueReference) && 854 (Flags & DINode::FlagRValueReference); 855 } 856 857 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) { 858 auto *Params = dyn_cast<MDTuple>(&RawParams); 859 AssertDI(Params, "invalid template params", &N, &RawParams); 860 for (Metadata *Op : Params->operands()) { 861 AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter", 862 &N, Params, Op); 863 } 864 } 865 866 void Verifier::visitDICompositeType(const DICompositeType &N) { 867 // Common scope checks. 868 visitDIScope(N); 869 870 AssertDI(N.getTag() == dwarf::DW_TAG_array_type || 871 N.getTag() == dwarf::DW_TAG_structure_type || 872 N.getTag() == dwarf::DW_TAG_union_type || 873 N.getTag() == dwarf::DW_TAG_enumeration_type || 874 N.getTag() == dwarf::DW_TAG_class_type, 875 "invalid tag", &N); 876 877 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 878 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N, 879 N.getRawBaseType()); 880 881 AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()), 882 "invalid composite elements", &N, N.getRawElements()); 883 AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N, 884 N.getRawVTableHolder()); 885 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 886 "invalid reference flags", &N); 887 if (auto *Params = N.getRawTemplateParams()) 888 visitTemplateParams(N, *Params); 889 890 if (N.getTag() == dwarf::DW_TAG_class_type || 891 N.getTag() == dwarf::DW_TAG_union_type) { 892 AssertDI(N.getFile() && !N.getFile()->getFilename().empty(), 893 "class/union requires a filename", &N, N.getFile()); 894 } 895 } 896 897 void Verifier::visitDISubroutineType(const DISubroutineType &N) { 898 AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N); 899 if (auto *Types = N.getRawTypeArray()) { 900 AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types); 901 for (Metadata *Ty : N.getTypeArray()->operands()) { 902 AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty); 903 } 904 } 905 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 906 "invalid reference flags", &N); 907 } 908 909 void Verifier::visitDIFile(const DIFile &N) { 910 AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N); 911 } 912 913 void Verifier::visitDICompileUnit(const DICompileUnit &N) { 914 AssertDI(N.isDistinct(), "compile units must be distinct", &N); 915 AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N); 916 917 // Don't bother verifying the compilation directory or producer string 918 // as those could be empty. 919 AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N, 920 N.getRawFile()); 921 AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N, 922 N.getFile()); 923 924 AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind), 925 "invalid emission kind", &N); 926 927 if (auto *Array = N.getRawEnumTypes()) { 928 AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array); 929 for (Metadata *Op : N.getEnumTypes()->operands()) { 930 auto *Enum = dyn_cast_or_null<DICompositeType>(Op); 931 AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type, 932 "invalid enum type", &N, N.getEnumTypes(), Op); 933 } 934 } 935 if (auto *Array = N.getRawRetainedTypes()) { 936 AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array); 937 for (Metadata *Op : N.getRetainedTypes()->operands()) { 938 AssertDI(Op && (isa<DIType>(Op) || 939 (isa<DISubprogram>(Op) && 940 cast<DISubprogram>(Op)->isDefinition() == false)), 941 "invalid retained type", &N, Op); 942 } 943 } 944 if (auto *Array = N.getRawGlobalVariables()) { 945 AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array); 946 for (Metadata *Op : N.getGlobalVariables()->operands()) { 947 AssertDI(Op && isa<DIGlobalVariable>(Op), "invalid global variable ref", 948 &N, Op); 949 } 950 } 951 if (auto *Array = N.getRawImportedEntities()) { 952 AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array); 953 for (Metadata *Op : N.getImportedEntities()->operands()) { 954 AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref", 955 &N, Op); 956 } 957 } 958 if (auto *Array = N.getRawMacros()) { 959 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array); 960 for (Metadata *Op : N.getMacros()->operands()) { 961 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op); 962 } 963 } 964 CUVisited.insert(&N); 965 } 966 967 void Verifier::visitDISubprogram(const DISubprogram &N) { 968 AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N); 969 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope()); 970 if (auto *F = N.getRawFile()) 971 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 972 if (auto *T = N.getRawType()) 973 AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T); 974 AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N, 975 N.getRawContainingType()); 976 if (auto *Params = N.getRawTemplateParams()) 977 visitTemplateParams(N, *Params); 978 if (auto *S = N.getRawDeclaration()) 979 AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(), 980 "invalid subprogram declaration", &N, S); 981 if (auto *RawVars = N.getRawVariables()) { 982 auto *Vars = dyn_cast<MDTuple>(RawVars); 983 AssertDI(Vars, "invalid variable list", &N, RawVars); 984 for (Metadata *Op : Vars->operands()) { 985 AssertDI(Op && isa<DILocalVariable>(Op), "invalid local variable", &N, 986 Vars, Op); 987 } 988 } 989 AssertDI(!hasConflictingReferenceFlags(N.getFlags()), 990 "invalid reference flags", &N); 991 992 auto *Unit = N.getRawUnit(); 993 if (N.isDefinition()) { 994 // Subprogram definitions (not part of the type hierarchy). 995 AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N); 996 AssertDI(Unit, "subprogram definitions must have a compile unit", &N); 997 AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit); 998 } else { 999 // Subprogram declarations (part of the type hierarchy). 1000 AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N); 1001 } 1002 } 1003 1004 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) { 1005 AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N); 1006 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 1007 "invalid local scope", &N, N.getRawScope()); 1008 } 1009 1010 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) { 1011 visitDILexicalBlockBase(N); 1012 1013 AssertDI(N.getLine() || !N.getColumn(), 1014 "cannot have column info without line info", &N); 1015 } 1016 1017 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) { 1018 visitDILexicalBlockBase(N); 1019 } 1020 1021 void Verifier::visitDINamespace(const DINamespace &N) { 1022 AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N); 1023 if (auto *S = N.getRawScope()) 1024 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S); 1025 } 1026 1027 void Verifier::visitDIMacro(const DIMacro &N) { 1028 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define || 1029 N.getMacinfoType() == dwarf::DW_MACINFO_undef, 1030 "invalid macinfo type", &N); 1031 AssertDI(!N.getName().empty(), "anonymous macro", &N); 1032 if (!N.getValue().empty()) { 1033 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix"); 1034 } 1035 } 1036 1037 void Verifier::visitDIMacroFile(const DIMacroFile &N) { 1038 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file, 1039 "invalid macinfo type", &N); 1040 if (auto *F = N.getRawFile()) 1041 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1042 1043 if (auto *Array = N.getRawElements()) { 1044 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array); 1045 for (Metadata *Op : N.getElements()->operands()) { 1046 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op); 1047 } 1048 } 1049 } 1050 1051 void Verifier::visitDIModule(const DIModule &N) { 1052 AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N); 1053 AssertDI(!N.getName().empty(), "anonymous module", &N); 1054 } 1055 1056 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) { 1057 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); 1058 } 1059 1060 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) { 1061 visitDITemplateParameter(N); 1062 1063 AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag", 1064 &N); 1065 } 1066 1067 void Verifier::visitDITemplateValueParameter( 1068 const DITemplateValueParameter &N) { 1069 visitDITemplateParameter(N); 1070 1071 AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter || 1072 N.getTag() == dwarf::DW_TAG_GNU_template_template_param || 1073 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack, 1074 "invalid tag", &N); 1075 } 1076 1077 void Verifier::visitDIVariable(const DIVariable &N) { 1078 if (auto *S = N.getRawScope()) 1079 AssertDI(isa<DIScope>(S), "invalid scope", &N, S); 1080 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType()); 1081 if (auto *F = N.getRawFile()) 1082 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1083 } 1084 1085 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) { 1086 // Checks common to all variables. 1087 visitDIVariable(N); 1088 1089 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); 1090 AssertDI(!N.getName().empty(), "missing global variable name", &N); 1091 if (auto *V = N.getRawVariable()) { 1092 AssertDI(isa<ConstantAsMetadata>(V) && 1093 !isa<Function>(cast<ConstantAsMetadata>(V)->getValue()), 1094 "invalid global varaible ref", &N, V); 1095 visitConstantExprsRecursively(cast<ConstantAsMetadata>(V)->getValue()); 1096 } 1097 if (auto *Member = N.getRawStaticDataMemberDeclaration()) { 1098 AssertDI(isa<DIDerivedType>(Member), 1099 "invalid static data member declaration", &N, Member); 1100 } 1101 } 1102 1103 void Verifier::visitDILocalVariable(const DILocalVariable &N) { 1104 // Checks common to all variables. 1105 visitDIVariable(N); 1106 1107 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N); 1108 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()), 1109 "local variable requires a valid scope", &N, N.getRawScope()); 1110 } 1111 1112 void Verifier::visitDIExpression(const DIExpression &N) { 1113 AssertDI(N.isValid(), "invalid expression", &N); 1114 } 1115 1116 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) { 1117 AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N); 1118 if (auto *T = N.getRawType()) 1119 AssertDI(isType(T), "invalid type ref", &N, T); 1120 if (auto *F = N.getRawFile()) 1121 AssertDI(isa<DIFile>(F), "invalid file", &N, F); 1122 } 1123 1124 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) { 1125 AssertDI(N.getTag() == dwarf::DW_TAG_imported_module || 1126 N.getTag() == dwarf::DW_TAG_imported_declaration, 1127 "invalid tag", &N); 1128 if (auto *S = N.getRawScope()) 1129 AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S); 1130 AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N, 1131 N.getRawEntity()); 1132 } 1133 1134 void Verifier::visitComdat(const Comdat &C) { 1135 // The Module is invalid if the GlobalValue has private linkage. Entities 1136 // with private linkage don't have entries in the symbol table. 1137 if (const GlobalValue *GV = M->getNamedValue(C.getName())) 1138 Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage", 1139 GV); 1140 } 1141 1142 void Verifier::visitModuleIdents(const Module &M) { 1143 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident"); 1144 if (!Idents) 1145 return; 1146 1147 // llvm.ident takes a list of metadata entry. Each entry has only one string. 1148 // Scan each llvm.ident entry and make sure that this requirement is met. 1149 for (const MDNode *N : Idents->operands()) { 1150 Assert(N->getNumOperands() == 1, 1151 "incorrect number of operands in llvm.ident metadata", N); 1152 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)), 1153 ("invalid value for llvm.ident metadata entry operand" 1154 "(the operand should be a string)"), 1155 N->getOperand(0)); 1156 } 1157 } 1158 1159 void Verifier::visitModuleFlags(const Module &M) { 1160 const NamedMDNode *Flags = M.getModuleFlagsMetadata(); 1161 if (!Flags) return; 1162 1163 // Scan each flag, and track the flags and requirements. 1164 DenseMap<const MDString*, const MDNode*> SeenIDs; 1165 SmallVector<const MDNode*, 16> Requirements; 1166 for (const MDNode *MDN : Flags->operands()) 1167 visitModuleFlag(MDN, SeenIDs, Requirements); 1168 1169 // Validate that the requirements in the module are valid. 1170 for (const MDNode *Requirement : Requirements) { 1171 const MDString *Flag = cast<MDString>(Requirement->getOperand(0)); 1172 const Metadata *ReqValue = Requirement->getOperand(1); 1173 1174 const MDNode *Op = SeenIDs.lookup(Flag); 1175 if (!Op) { 1176 CheckFailed("invalid requirement on flag, flag is not present in module", 1177 Flag); 1178 continue; 1179 } 1180 1181 if (Op->getOperand(2) != ReqValue) { 1182 CheckFailed(("invalid requirement on flag, " 1183 "flag does not have the required value"), 1184 Flag); 1185 continue; 1186 } 1187 } 1188 } 1189 1190 void 1191 Verifier::visitModuleFlag(const MDNode *Op, 1192 DenseMap<const MDString *, const MDNode *> &SeenIDs, 1193 SmallVectorImpl<const MDNode *> &Requirements) { 1194 // Each module flag should have three arguments, the merge behavior (a 1195 // constant int), the flag ID (an MDString), and the value. 1196 Assert(Op->getNumOperands() == 3, 1197 "incorrect number of operands in module flag", Op); 1198 Module::ModFlagBehavior MFB; 1199 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) { 1200 Assert( 1201 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)), 1202 "invalid behavior operand in module flag (expected constant integer)", 1203 Op->getOperand(0)); 1204 Assert(false, 1205 "invalid behavior operand in module flag (unexpected constant)", 1206 Op->getOperand(0)); 1207 } 1208 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 1209 Assert(ID, "invalid ID operand in module flag (expected metadata string)", 1210 Op->getOperand(1)); 1211 1212 // Sanity check the values for behaviors with additional requirements. 1213 switch (MFB) { 1214 case Module::Error: 1215 case Module::Warning: 1216 case Module::Override: 1217 // These behavior types accept any value. 1218 break; 1219 1220 case Module::Require: { 1221 // The value should itself be an MDNode with two operands, a flag ID (an 1222 // MDString), and a value. 1223 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2)); 1224 Assert(Value && Value->getNumOperands() == 2, 1225 "invalid value for 'require' module flag (expected metadata pair)", 1226 Op->getOperand(2)); 1227 Assert(isa<MDString>(Value->getOperand(0)), 1228 ("invalid value for 'require' module flag " 1229 "(first value operand should be a string)"), 1230 Value->getOperand(0)); 1231 1232 // Append it to the list of requirements, to check once all module flags are 1233 // scanned. 1234 Requirements.push_back(Value); 1235 break; 1236 } 1237 1238 case Module::Append: 1239 case Module::AppendUnique: { 1240 // These behavior types require the operand be an MDNode. 1241 Assert(isa<MDNode>(Op->getOperand(2)), 1242 "invalid value for 'append'-type module flag " 1243 "(expected a metadata node)", 1244 Op->getOperand(2)); 1245 break; 1246 } 1247 } 1248 1249 // Unless this is a "requires" flag, check the ID is unique. 1250 if (MFB != Module::Require) { 1251 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second; 1252 Assert(Inserted, 1253 "module flag identifiers must be unique (or of 'require' type)", ID); 1254 } 1255 } 1256 1257 void Verifier::verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, 1258 bool isFunction, const Value *V) { 1259 unsigned Slot = ~0U; 1260 for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I) 1261 if (Attrs.getSlotIndex(I) == Idx) { 1262 Slot = I; 1263 break; 1264 } 1265 1266 assert(Slot != ~0U && "Attribute set inconsistency!"); 1267 1268 for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot); 1269 I != E; ++I) { 1270 if (I->isStringAttribute()) 1271 continue; 1272 1273 if (I->getKindAsEnum() == Attribute::NoReturn || 1274 I->getKindAsEnum() == Attribute::NoUnwind || 1275 I->getKindAsEnum() == Attribute::NoInline || 1276 I->getKindAsEnum() == Attribute::AlwaysInline || 1277 I->getKindAsEnum() == Attribute::OptimizeForSize || 1278 I->getKindAsEnum() == Attribute::StackProtect || 1279 I->getKindAsEnum() == Attribute::StackProtectReq || 1280 I->getKindAsEnum() == Attribute::StackProtectStrong || 1281 I->getKindAsEnum() == Attribute::SafeStack || 1282 I->getKindAsEnum() == Attribute::NoRedZone || 1283 I->getKindAsEnum() == Attribute::NoImplicitFloat || 1284 I->getKindAsEnum() == Attribute::Naked || 1285 I->getKindAsEnum() == Attribute::InlineHint || 1286 I->getKindAsEnum() == Attribute::StackAlignment || 1287 I->getKindAsEnum() == Attribute::UWTable || 1288 I->getKindAsEnum() == Attribute::NonLazyBind || 1289 I->getKindAsEnum() == Attribute::ReturnsTwice || 1290 I->getKindAsEnum() == Attribute::SanitizeAddress || 1291 I->getKindAsEnum() == Attribute::SanitizeThread || 1292 I->getKindAsEnum() == Attribute::SanitizeMemory || 1293 I->getKindAsEnum() == Attribute::MinSize || 1294 I->getKindAsEnum() == Attribute::NoDuplicate || 1295 I->getKindAsEnum() == Attribute::Builtin || 1296 I->getKindAsEnum() == Attribute::NoBuiltin || 1297 I->getKindAsEnum() == Attribute::Cold || 1298 I->getKindAsEnum() == Attribute::OptimizeNone || 1299 I->getKindAsEnum() == Attribute::JumpTable || 1300 I->getKindAsEnum() == Attribute::Convergent || 1301 I->getKindAsEnum() == Attribute::ArgMemOnly || 1302 I->getKindAsEnum() == Attribute::NoRecurse || 1303 I->getKindAsEnum() == Attribute::InaccessibleMemOnly || 1304 I->getKindAsEnum() == Attribute::InaccessibleMemOrArgMemOnly || 1305 I->getKindAsEnum() == Attribute::AllocSize) { 1306 if (!isFunction) { 1307 CheckFailed("Attribute '" + I->getAsString() + 1308 "' only applies to functions!", V); 1309 return; 1310 } 1311 } else if (I->getKindAsEnum() == Attribute::ReadOnly || 1312 I->getKindAsEnum() == Attribute::WriteOnly || 1313 I->getKindAsEnum() == Attribute::ReadNone) { 1314 if (Idx == 0) { 1315 CheckFailed("Attribute '" + I->getAsString() + 1316 "' does not apply to function returns"); 1317 return; 1318 } 1319 } else if (isFunction) { 1320 CheckFailed("Attribute '" + I->getAsString() + 1321 "' does not apply to functions!", V); 1322 return; 1323 } 1324 } 1325 } 1326 1327 // VerifyParameterAttrs - Check the given attributes for an argument or return 1328 // value of the specified type. The value V is printed in error messages. 1329 void Verifier::verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty, 1330 bool isReturnValue, const Value *V) { 1331 if (!Attrs.hasAttributes(Idx)) 1332 return; 1333 1334 verifyAttributeTypes(Attrs, Idx, false, V); 1335 1336 if (isReturnValue) 1337 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && 1338 !Attrs.hasAttribute(Idx, Attribute::Nest) && 1339 !Attrs.hasAttribute(Idx, Attribute::StructRet) && 1340 !Attrs.hasAttribute(Idx, Attribute::NoCapture) && 1341 !Attrs.hasAttribute(Idx, Attribute::Returned) && 1342 !Attrs.hasAttribute(Idx, Attribute::InAlloca) && 1343 !Attrs.hasAttribute(Idx, Attribute::SwiftSelf) && 1344 !Attrs.hasAttribute(Idx, Attribute::SwiftError), 1345 "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', " 1346 "'returned', 'swiftself', and 'swifterror' do not apply to return " 1347 "values!", 1348 V); 1349 1350 // Check for mutually incompatible attributes. Only inreg is compatible with 1351 // sret. 1352 unsigned AttrCount = 0; 1353 AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal); 1354 AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca); 1355 AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) || 1356 Attrs.hasAttribute(Idx, Attribute::InReg); 1357 AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest); 1358 Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', " 1359 "and 'sret' are incompatible!", 1360 V); 1361 1362 Assert(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) && 1363 Attrs.hasAttribute(Idx, Attribute::ReadOnly)), 1364 "Attributes " 1365 "'inalloca and readonly' are incompatible!", 1366 V); 1367 1368 Assert(!(Attrs.hasAttribute(Idx, Attribute::StructRet) && 1369 Attrs.hasAttribute(Idx, Attribute::Returned)), 1370 "Attributes " 1371 "'sret and returned' are incompatible!", 1372 V); 1373 1374 Assert(!(Attrs.hasAttribute(Idx, Attribute::ZExt) && 1375 Attrs.hasAttribute(Idx, Attribute::SExt)), 1376 "Attributes " 1377 "'zeroext and signext' are incompatible!", 1378 V); 1379 1380 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) && 1381 Attrs.hasAttribute(Idx, Attribute::ReadOnly)), 1382 "Attributes " 1383 "'readnone and readonly' are incompatible!", 1384 V); 1385 1386 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) && 1387 Attrs.hasAttribute(Idx, Attribute::WriteOnly)), 1388 "Attributes " 1389 "'readnone and writeonly' are incompatible!", 1390 V); 1391 1392 Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadOnly) && 1393 Attrs.hasAttribute(Idx, Attribute::WriteOnly)), 1394 "Attributes " 1395 "'readonly and writeonly' are incompatible!", 1396 V); 1397 1398 Assert(!(Attrs.hasAttribute(Idx, Attribute::NoInline) && 1399 Attrs.hasAttribute(Idx, Attribute::AlwaysInline)), 1400 "Attributes " 1401 "'noinline and alwaysinline' are incompatible!", 1402 V); 1403 1404 Assert(!AttrBuilder(Attrs, Idx) 1405 .overlaps(AttributeFuncs::typeIncompatible(Ty)), 1406 "Wrong types for attribute: " + 1407 AttributeSet::get(*Context, Idx, 1408 AttributeFuncs::typeIncompatible(Ty)).getAsString(Idx), 1409 V); 1410 1411 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) { 1412 SmallPtrSet<Type*, 4> Visited; 1413 if (!PTy->getElementType()->isSized(&Visited)) { 1414 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && 1415 !Attrs.hasAttribute(Idx, Attribute::InAlloca), 1416 "Attributes 'byval' and 'inalloca' do not support unsized types!", 1417 V); 1418 } 1419 if (!isa<PointerType>(PTy->getElementType())) 1420 Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError), 1421 "Attribute 'swifterror' only applies to parameters " 1422 "with pointer to pointer type!", 1423 V); 1424 } else { 1425 Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal), 1426 "Attribute 'byval' only applies to parameters with pointer type!", 1427 V); 1428 Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError), 1429 "Attribute 'swifterror' only applies to parameters " 1430 "with pointer type!", 1431 V); 1432 } 1433 } 1434 1435 // Check parameter attributes against a function type. 1436 // The value V is printed in error messages. 1437 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs, 1438 const Value *V) { 1439 if (Attrs.isEmpty()) 1440 return; 1441 1442 bool SawNest = false; 1443 bool SawReturned = false; 1444 bool SawSRet = false; 1445 bool SawSwiftSelf = false; 1446 bool SawSwiftError = false; 1447 1448 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1449 unsigned Idx = Attrs.getSlotIndex(i); 1450 1451 Type *Ty; 1452 if (Idx == 0) 1453 Ty = FT->getReturnType(); 1454 else if (Idx-1 < FT->getNumParams()) 1455 Ty = FT->getParamType(Idx-1); 1456 else 1457 break; // VarArgs attributes, verified elsewhere. 1458 1459 verifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V); 1460 1461 if (Idx == 0) 1462 continue; 1463 1464 if (Attrs.hasAttribute(Idx, Attribute::Nest)) { 1465 Assert(!SawNest, "More than one parameter has attribute nest!", V); 1466 SawNest = true; 1467 } 1468 1469 if (Attrs.hasAttribute(Idx, Attribute::Returned)) { 1470 Assert(!SawReturned, "More than one parameter has attribute returned!", 1471 V); 1472 Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()), 1473 "Incompatible " 1474 "argument and return types for 'returned' attribute", 1475 V); 1476 SawReturned = true; 1477 } 1478 1479 if (Attrs.hasAttribute(Idx, Attribute::StructRet)) { 1480 Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V); 1481 Assert(Idx == 1 || Idx == 2, 1482 "Attribute 'sret' is not on first or second parameter!", V); 1483 SawSRet = true; 1484 } 1485 1486 if (Attrs.hasAttribute(Idx, Attribute::SwiftSelf)) { 1487 Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V); 1488 SawSwiftSelf = true; 1489 } 1490 1491 if (Attrs.hasAttribute(Idx, Attribute::SwiftError)) { 1492 Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", 1493 V); 1494 SawSwiftError = true; 1495 } 1496 1497 if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) { 1498 Assert(Idx == FT->getNumParams(), "inalloca isn't on the last parameter!", 1499 V); 1500 } 1501 } 1502 1503 if (!Attrs.hasAttributes(AttributeSet::FunctionIndex)) 1504 return; 1505 1506 verifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V); 1507 1508 Assert( 1509 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1510 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)), 1511 "Attributes 'readnone and readonly' are incompatible!", V); 1512 1513 Assert( 1514 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1515 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::WriteOnly)), 1516 "Attributes 'readnone and writeonly' are incompatible!", V); 1517 1518 Assert( 1519 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly) && 1520 Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::WriteOnly)), 1521 "Attributes 'readonly and writeonly' are incompatible!", V); 1522 1523 Assert( 1524 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1525 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1526 Attribute::InaccessibleMemOrArgMemOnly)), 1527 "Attributes 'readnone and inaccessiblemem_or_argmemonly' are incompatible!", V); 1528 1529 Assert( 1530 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) && 1531 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1532 Attribute::InaccessibleMemOnly)), 1533 "Attributes 'readnone and inaccessiblememonly' are incompatible!", V); 1534 1535 Assert( 1536 !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline) && 1537 Attrs.hasAttribute(AttributeSet::FunctionIndex, 1538 Attribute::AlwaysInline)), 1539 "Attributes 'noinline and alwaysinline' are incompatible!", V); 1540 1541 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, 1542 Attribute::OptimizeNone)) { 1543 Assert(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline), 1544 "Attribute 'optnone' requires 'noinline'!", V); 1545 1546 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, 1547 Attribute::OptimizeForSize), 1548 "Attributes 'optsize and optnone' are incompatible!", V); 1549 1550 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize), 1551 "Attributes 'minsize and optnone' are incompatible!", V); 1552 } 1553 1554 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, 1555 Attribute::JumpTable)) { 1556 const GlobalValue *GV = cast<GlobalValue>(V); 1557 Assert(GV->hasGlobalUnnamedAddr(), 1558 "Attribute 'jumptable' requires 'unnamed_addr'", V); 1559 } 1560 1561 if (Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::AllocSize)) { 1562 std::pair<unsigned, Optional<unsigned>> Args = 1563 Attrs.getAllocSizeArgs(AttributeSet::FunctionIndex); 1564 1565 auto CheckParam = [&](StringRef Name, unsigned ParamNo) { 1566 if (ParamNo >= FT->getNumParams()) { 1567 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V); 1568 return false; 1569 } 1570 1571 if (!FT->getParamType(ParamNo)->isIntegerTy()) { 1572 CheckFailed("'allocsize' " + Name + 1573 " argument must refer to an integer parameter", 1574 V); 1575 return false; 1576 } 1577 1578 return true; 1579 }; 1580 1581 if (!CheckParam("element size", Args.first)) 1582 return; 1583 1584 if (Args.second && !CheckParam("number of elements", *Args.second)) 1585 return; 1586 } 1587 } 1588 1589 void Verifier::verifyFunctionMetadata( 1590 ArrayRef<std::pair<unsigned, MDNode *>> MDs) { 1591 for (const auto &Pair : MDs) { 1592 if (Pair.first == LLVMContext::MD_prof) { 1593 MDNode *MD = Pair.second; 1594 Assert(MD->getNumOperands() == 2, 1595 "!prof annotations should have exactly 2 operands", MD); 1596 1597 // Check first operand. 1598 Assert(MD->getOperand(0) != nullptr, "first operand should not be null", 1599 MD); 1600 Assert(isa<MDString>(MD->getOperand(0)), 1601 "expected string with name of the !prof annotation", MD); 1602 MDString *MDS = cast<MDString>(MD->getOperand(0)); 1603 StringRef ProfName = MDS->getString(); 1604 Assert(ProfName.equals("function_entry_count"), 1605 "first operand should be 'function_entry_count'", MD); 1606 1607 // Check second operand. 1608 Assert(MD->getOperand(1) != nullptr, "second operand should not be null", 1609 MD); 1610 Assert(isa<ConstantAsMetadata>(MD->getOperand(1)), 1611 "expected integer argument to function_entry_count", MD); 1612 } 1613 } 1614 } 1615 1616 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) { 1617 if (!ConstantExprVisited.insert(EntryC).second) 1618 return; 1619 1620 SmallVector<const Constant *, 16> Stack; 1621 Stack.push_back(EntryC); 1622 1623 while (!Stack.empty()) { 1624 const Constant *C = Stack.pop_back_val(); 1625 1626 // Check this constant expression. 1627 if (const auto *CE = dyn_cast<ConstantExpr>(C)) 1628 visitConstantExpr(CE); 1629 1630 if (const auto *GV = dyn_cast<GlobalValue>(C)) { 1631 // Global Values get visited separately, but we do need to make sure 1632 // that the global value is in the correct module 1633 Assert(GV->getParent() == M, "Referencing global in another module!", 1634 EntryC, M, GV, GV->getParent()); 1635 continue; 1636 } 1637 1638 // Visit all sub-expressions. 1639 for (const Use &U : C->operands()) { 1640 const auto *OpC = dyn_cast<Constant>(U); 1641 if (!OpC) 1642 continue; 1643 if (!ConstantExprVisited.insert(OpC).second) 1644 continue; 1645 Stack.push_back(OpC); 1646 } 1647 } 1648 } 1649 1650 void Verifier::visitConstantExpr(const ConstantExpr *CE) { 1651 if (CE->getOpcode() != Instruction::BitCast) 1652 return; 1653 1654 Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0), 1655 CE->getType()), 1656 "Invalid bitcast", CE); 1657 } 1658 1659 bool Verifier::verifyAttributeCount(AttributeSet Attrs, unsigned Params) { 1660 if (Attrs.getNumSlots() == 0) 1661 return true; 1662 1663 unsigned LastSlot = Attrs.getNumSlots() - 1; 1664 unsigned LastIndex = Attrs.getSlotIndex(LastSlot); 1665 if (LastIndex <= Params 1666 || (LastIndex == AttributeSet::FunctionIndex 1667 && (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params))) 1668 return true; 1669 1670 return false; 1671 } 1672 1673 /// Verify that statepoint intrinsic is well formed. 1674 void Verifier::verifyStatepoint(ImmutableCallSite CS) { 1675 assert(CS.getCalledFunction() && 1676 CS.getCalledFunction()->getIntrinsicID() == 1677 Intrinsic::experimental_gc_statepoint); 1678 1679 const Instruction &CI = *CS.getInstruction(); 1680 1681 Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() && 1682 !CS.onlyAccessesArgMemory(), 1683 "gc.statepoint must read and write all memory to preserve " 1684 "reordering restrictions required by safepoint semantics", 1685 &CI); 1686 1687 const Value *IDV = CS.getArgument(0); 1688 Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer", 1689 &CI); 1690 1691 const Value *NumPatchBytesV = CS.getArgument(1); 1692 Assert(isa<ConstantInt>(NumPatchBytesV), 1693 "gc.statepoint number of patchable bytes must be a constant integer", 1694 &CI); 1695 const int64_t NumPatchBytes = 1696 cast<ConstantInt>(NumPatchBytesV)->getSExtValue(); 1697 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!"); 1698 Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be " 1699 "positive", 1700 &CI); 1701 1702 const Value *Target = CS.getArgument(2); 1703 auto *PT = dyn_cast<PointerType>(Target->getType()); 1704 Assert(PT && PT->getElementType()->isFunctionTy(), 1705 "gc.statepoint callee must be of function pointer type", &CI, Target); 1706 FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType()); 1707 1708 const Value *NumCallArgsV = CS.getArgument(3); 1709 Assert(isa<ConstantInt>(NumCallArgsV), 1710 "gc.statepoint number of arguments to underlying call " 1711 "must be constant integer", 1712 &CI); 1713 const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue(); 1714 Assert(NumCallArgs >= 0, 1715 "gc.statepoint number of arguments to underlying call " 1716 "must be positive", 1717 &CI); 1718 const int NumParams = (int)TargetFuncType->getNumParams(); 1719 if (TargetFuncType->isVarArg()) { 1720 Assert(NumCallArgs >= NumParams, 1721 "gc.statepoint mismatch in number of vararg call args", &CI); 1722 1723 // TODO: Remove this limitation 1724 Assert(TargetFuncType->getReturnType()->isVoidTy(), 1725 "gc.statepoint doesn't support wrapping non-void " 1726 "vararg functions yet", 1727 &CI); 1728 } else 1729 Assert(NumCallArgs == NumParams, 1730 "gc.statepoint mismatch in number of call args", &CI); 1731 1732 const Value *FlagsV = CS.getArgument(4); 1733 Assert(isa<ConstantInt>(FlagsV), 1734 "gc.statepoint flags must be constant integer", &CI); 1735 const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue(); 1736 Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0, 1737 "unknown flag used in gc.statepoint flags argument", &CI); 1738 1739 // Verify that the types of the call parameter arguments match 1740 // the type of the wrapped callee. 1741 for (int i = 0; i < NumParams; i++) { 1742 Type *ParamType = TargetFuncType->getParamType(i); 1743 Type *ArgType = CS.getArgument(5 + i)->getType(); 1744 Assert(ArgType == ParamType, 1745 "gc.statepoint call argument does not match wrapped " 1746 "function type", 1747 &CI); 1748 } 1749 1750 const int EndCallArgsInx = 4 + NumCallArgs; 1751 1752 const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1); 1753 Assert(isa<ConstantInt>(NumTransitionArgsV), 1754 "gc.statepoint number of transition arguments " 1755 "must be constant integer", 1756 &CI); 1757 const int NumTransitionArgs = 1758 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue(); 1759 Assert(NumTransitionArgs >= 0, 1760 "gc.statepoint number of transition arguments must be positive", &CI); 1761 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs; 1762 1763 const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1); 1764 Assert(isa<ConstantInt>(NumDeoptArgsV), 1765 "gc.statepoint number of deoptimization arguments " 1766 "must be constant integer", 1767 &CI); 1768 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue(); 1769 Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments " 1770 "must be positive", 1771 &CI); 1772 1773 const int ExpectedNumArgs = 1774 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs; 1775 Assert(ExpectedNumArgs <= (int)CS.arg_size(), 1776 "gc.statepoint too few arguments according to length fields", &CI); 1777 1778 // Check that the only uses of this gc.statepoint are gc.result or 1779 // gc.relocate calls which are tied to this statepoint and thus part 1780 // of the same statepoint sequence 1781 for (const User *U : CI.users()) { 1782 const CallInst *Call = dyn_cast<const CallInst>(U); 1783 Assert(Call, "illegal use of statepoint token", &CI, U); 1784 if (!Call) continue; 1785 Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call), 1786 "gc.result or gc.relocate are the only value uses" 1787 "of a gc.statepoint", 1788 &CI, U); 1789 if (isa<GCResultInst>(Call)) { 1790 Assert(Call->getArgOperand(0) == &CI, 1791 "gc.result connected to wrong gc.statepoint", &CI, Call); 1792 } else if (isa<GCRelocateInst>(Call)) { 1793 Assert(Call->getArgOperand(0) == &CI, 1794 "gc.relocate connected to wrong gc.statepoint", &CI, Call); 1795 } 1796 } 1797 1798 // Note: It is legal for a single derived pointer to be listed multiple 1799 // times. It's non-optimal, but it is legal. It can also happen after 1800 // insertion if we strip a bitcast away. 1801 // Note: It is really tempting to check that each base is relocated and 1802 // that a derived pointer is never reused as a base pointer. This turns 1803 // out to be problematic since optimizations run after safepoint insertion 1804 // can recognize equality properties that the insertion logic doesn't know 1805 // about. See example statepoint.ll in the verifier subdirectory 1806 } 1807 1808 void Verifier::verifyFrameRecoverIndices() { 1809 for (auto &Counts : FrameEscapeInfo) { 1810 Function *F = Counts.first; 1811 unsigned EscapedObjectCount = Counts.second.first; 1812 unsigned MaxRecoveredIndex = Counts.second.second; 1813 Assert(MaxRecoveredIndex <= EscapedObjectCount, 1814 "all indices passed to llvm.localrecover must be less than the " 1815 "number of arguments passed ot llvm.localescape in the parent " 1816 "function", 1817 F); 1818 } 1819 } 1820 1821 static Instruction *getSuccPad(TerminatorInst *Terminator) { 1822 BasicBlock *UnwindDest; 1823 if (auto *II = dyn_cast<InvokeInst>(Terminator)) 1824 UnwindDest = II->getUnwindDest(); 1825 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator)) 1826 UnwindDest = CSI->getUnwindDest(); 1827 else 1828 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest(); 1829 return UnwindDest->getFirstNonPHI(); 1830 } 1831 1832 void Verifier::verifySiblingFuncletUnwinds() { 1833 SmallPtrSet<Instruction *, 8> Visited; 1834 SmallPtrSet<Instruction *, 8> Active; 1835 for (const auto &Pair : SiblingFuncletInfo) { 1836 Instruction *PredPad = Pair.first; 1837 if (Visited.count(PredPad)) 1838 continue; 1839 Active.insert(PredPad); 1840 TerminatorInst *Terminator = Pair.second; 1841 do { 1842 Instruction *SuccPad = getSuccPad(Terminator); 1843 if (Active.count(SuccPad)) { 1844 // Found a cycle; report error 1845 Instruction *CyclePad = SuccPad; 1846 SmallVector<Instruction *, 8> CycleNodes; 1847 do { 1848 CycleNodes.push_back(CyclePad); 1849 TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad]; 1850 if (CycleTerminator != CyclePad) 1851 CycleNodes.push_back(CycleTerminator); 1852 CyclePad = getSuccPad(CycleTerminator); 1853 } while (CyclePad != SuccPad); 1854 Assert(false, "EH pads can't handle each other's exceptions", 1855 ArrayRef<Instruction *>(CycleNodes)); 1856 } 1857 // Don't re-walk a node we've already checked 1858 if (!Visited.insert(SuccPad).second) 1859 break; 1860 // Walk to this successor if it has a map entry. 1861 PredPad = SuccPad; 1862 auto TermI = SiblingFuncletInfo.find(PredPad); 1863 if (TermI == SiblingFuncletInfo.end()) 1864 break; 1865 Terminator = TermI->second; 1866 Active.insert(PredPad); 1867 } while (true); 1868 // Each node only has one successor, so we've walked all the active 1869 // nodes' successors. 1870 Active.clear(); 1871 } 1872 } 1873 1874 // visitFunction - Verify that a function is ok. 1875 // 1876 void Verifier::visitFunction(const Function &F) { 1877 visitGlobalValue(F); 1878 1879 // Check function arguments. 1880 FunctionType *FT = F.getFunctionType(); 1881 unsigned NumArgs = F.arg_size(); 1882 1883 Assert(Context == &F.getContext(), 1884 "Function context does not match Module context!", &F); 1885 1886 Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F); 1887 Assert(FT->getNumParams() == NumArgs, 1888 "# formal arguments must match # of arguments for function type!", &F, 1889 FT); 1890 Assert(F.getReturnType()->isFirstClassType() || 1891 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(), 1892 "Functions cannot return aggregate values!", &F); 1893 1894 Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(), 1895 "Invalid struct return type!", &F); 1896 1897 AttributeSet Attrs = F.getAttributes(); 1898 1899 Assert(verifyAttributeCount(Attrs, FT->getNumParams()), 1900 "Attribute after last parameter!", &F); 1901 1902 // Check function attributes. 1903 verifyFunctionAttrs(FT, Attrs, &F); 1904 1905 // On function declarations/definitions, we do not support the builtin 1906 // attribute. We do not check this in VerifyFunctionAttrs since that is 1907 // checking for Attributes that can/can not ever be on functions. 1908 Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::Builtin), 1909 "Attribute 'builtin' can only be applied to a callsite.", &F); 1910 1911 // Check that this function meets the restrictions on this calling convention. 1912 // Sometimes varargs is used for perfectly forwarding thunks, so some of these 1913 // restrictions can be lifted. 1914 switch (F.getCallingConv()) { 1915 default: 1916 case CallingConv::C: 1917 break; 1918 case CallingConv::Fast: 1919 case CallingConv::Cold: 1920 case CallingConv::Intel_OCL_BI: 1921 case CallingConv::PTX_Kernel: 1922 case CallingConv::PTX_Device: 1923 Assert(!F.isVarArg(), "Calling convention does not support varargs or " 1924 "perfect forwarding!", 1925 &F); 1926 break; 1927 } 1928 1929 bool isLLVMdotName = F.getName().size() >= 5 && 1930 F.getName().substr(0, 5) == "llvm."; 1931 1932 // Check that the argument values match the function type for this function... 1933 unsigned i = 0; 1934 for (const Argument &Arg : F.args()) { 1935 Assert(Arg.getType() == FT->getParamType(i), 1936 "Argument value does not match function argument type!", &Arg, 1937 FT->getParamType(i)); 1938 Assert(Arg.getType()->isFirstClassType(), 1939 "Function arguments must have first-class types!", &Arg); 1940 if (!isLLVMdotName) { 1941 Assert(!Arg.getType()->isMetadataTy(), 1942 "Function takes metadata but isn't an intrinsic", &Arg, &F); 1943 Assert(!Arg.getType()->isTokenTy(), 1944 "Function takes token but isn't an intrinsic", &Arg, &F); 1945 } 1946 1947 // Check that swifterror argument is only used by loads and stores. 1948 if (Attrs.hasAttribute(i+1, Attribute::SwiftError)) { 1949 verifySwiftErrorValue(&Arg); 1950 } 1951 ++i; 1952 } 1953 1954 if (!isLLVMdotName) 1955 Assert(!F.getReturnType()->isTokenTy(), 1956 "Functions returns a token but isn't an intrinsic", &F); 1957 1958 // Get the function metadata attachments. 1959 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; 1960 F.getAllMetadata(MDs); 1961 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync"); 1962 verifyFunctionMetadata(MDs); 1963 1964 // Check validity of the personality function 1965 if (F.hasPersonalityFn()) { 1966 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); 1967 if (Per) 1968 Assert(Per->getParent() == F.getParent(), 1969 "Referencing personality function in another module!", 1970 &F, F.getParent(), Per, Per->getParent()); 1971 } 1972 1973 if (F.isMaterializable()) { 1974 // Function has a body somewhere we can't see. 1975 Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F, 1976 MDs.empty() ? nullptr : MDs.front().second); 1977 } else if (F.isDeclaration()) { 1978 for (const auto &I : MDs) { 1979 AssertDI(I.first != LLVMContext::MD_dbg, 1980 "function declaration may not have a !dbg attachment", &F); 1981 Assert(I.first != LLVMContext::MD_prof, 1982 "function declaration may not have a !prof attachment", &F); 1983 1984 // Verify the metadata itself. 1985 visitMDNode(*I.second); 1986 } 1987 Assert(!F.hasPersonalityFn(), 1988 "Function declaration shouldn't have a personality routine", &F); 1989 } else { 1990 // Verify that this function (which has a body) is not named "llvm.*". It 1991 // is not legal to define intrinsics. 1992 Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F); 1993 1994 // Check the entry node 1995 const BasicBlock *Entry = &F.getEntryBlock(); 1996 Assert(pred_empty(Entry), 1997 "Entry block to function must not have predecessors!", Entry); 1998 1999 // The address of the entry block cannot be taken, unless it is dead. 2000 if (Entry->hasAddressTaken()) { 2001 Assert(!BlockAddress::lookup(Entry)->isConstantUsed(), 2002 "blockaddress may not be used with the entry block!", Entry); 2003 } 2004 2005 unsigned NumDebugAttachments = 0, NumProfAttachments = 0; 2006 // Visit metadata attachments. 2007 for (const auto &I : MDs) { 2008 // Verify that the attachment is legal. 2009 switch (I.first) { 2010 default: 2011 break; 2012 case LLVMContext::MD_dbg: 2013 ++NumDebugAttachments; 2014 AssertDI(NumDebugAttachments == 1, 2015 "function must have a single !dbg attachment", &F, I.second); 2016 AssertDI(isa<DISubprogram>(I.second), 2017 "function !dbg attachment must be a subprogram", &F, I.second); 2018 break; 2019 case LLVMContext::MD_prof: 2020 ++NumProfAttachments; 2021 Assert(NumProfAttachments == 1, 2022 "function must have a single !prof attachment", &F, I.second); 2023 break; 2024 } 2025 2026 // Verify the metadata itself. 2027 visitMDNode(*I.second); 2028 } 2029 } 2030 2031 // If this function is actually an intrinsic, verify that it is only used in 2032 // direct call/invokes, never having its "address taken". 2033 // Only do this if the module is materialized, otherwise we don't have all the 2034 // uses. 2035 if (F.getIntrinsicID() && F.getParent()->isMaterialized()) { 2036 const User *U; 2037 if (F.hasAddressTaken(&U)) 2038 Assert(0, "Invalid user of intrinsic instruction!", U); 2039 } 2040 2041 Assert(!F.hasDLLImportStorageClass() || 2042 (F.isDeclaration() && F.hasExternalLinkage()) || 2043 F.hasAvailableExternallyLinkage(), 2044 "Function is marked as dllimport, but not external.", &F); 2045 2046 auto *N = F.getSubprogram(); 2047 if (!N) 2048 return; 2049 2050 visitDISubprogram(*N); 2051 2052 // Check that all !dbg attachments lead to back to N (or, at least, another 2053 // subprogram that describes the same function). 2054 // 2055 // FIXME: Check this incrementally while visiting !dbg attachments. 2056 // FIXME: Only check when N is the canonical subprogram for F. 2057 SmallPtrSet<const MDNode *, 32> Seen; 2058 for (auto &BB : F) 2059 for (auto &I : BB) { 2060 // Be careful about using DILocation here since we might be dealing with 2061 // broken code (this is the Verifier after all). 2062 DILocation *DL = 2063 dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode()); 2064 if (!DL) 2065 continue; 2066 if (!Seen.insert(DL).second) 2067 continue; 2068 2069 DILocalScope *Scope = DL->getInlinedAtScope(); 2070 if (Scope && !Seen.insert(Scope).second) 2071 continue; 2072 2073 DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr; 2074 2075 // Scope and SP could be the same MDNode and we don't want to skip 2076 // validation in that case 2077 if (SP && ((Scope != SP) && !Seen.insert(SP).second)) 2078 continue; 2079 2080 // FIXME: Once N is canonical, check "SP == &N". 2081 Assert(SP->describes(&F), 2082 "!dbg attachment points at wrong subprogram for function", N, &F, 2083 &I, DL, Scope, SP); 2084 } 2085 } 2086 2087 // verifyBasicBlock - Verify that a basic block is well formed... 2088 // 2089 void Verifier::visitBasicBlock(BasicBlock &BB) { 2090 InstsInThisBlock.clear(); 2091 2092 // Ensure that basic blocks have terminators! 2093 Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB); 2094 2095 // Check constraints that this basic block imposes on all of the PHI nodes in 2096 // it. 2097 if (isa<PHINode>(BB.front())) { 2098 SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB)); 2099 SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; 2100 std::sort(Preds.begin(), Preds.end()); 2101 PHINode *PN; 2102 for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) { 2103 // Ensure that PHI nodes have at least one entry! 2104 Assert(PN->getNumIncomingValues() != 0, 2105 "PHI nodes must have at least one entry. If the block is dead, " 2106 "the PHI should be removed!", 2107 PN); 2108 Assert(PN->getNumIncomingValues() == Preds.size(), 2109 "PHINode should have one entry for each predecessor of its " 2110 "parent basic block!", 2111 PN); 2112 2113 // Get and sort all incoming values in the PHI node... 2114 Values.clear(); 2115 Values.reserve(PN->getNumIncomingValues()); 2116 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2117 Values.push_back(std::make_pair(PN->getIncomingBlock(i), 2118 PN->getIncomingValue(i))); 2119 std::sort(Values.begin(), Values.end()); 2120 2121 for (unsigned i = 0, e = Values.size(); i != e; ++i) { 2122 // Check to make sure that if there is more than one entry for a 2123 // particular basic block in this PHI node, that the incoming values are 2124 // all identical. 2125 // 2126 Assert(i == 0 || Values[i].first != Values[i - 1].first || 2127 Values[i].second == Values[i - 1].second, 2128 "PHI node has multiple entries for the same basic block with " 2129 "different incoming values!", 2130 PN, Values[i].first, Values[i].second, Values[i - 1].second); 2131 2132 // Check to make sure that the predecessors and PHI node entries are 2133 // matched up. 2134 Assert(Values[i].first == Preds[i], 2135 "PHI node entries do not match predecessors!", PN, 2136 Values[i].first, Preds[i]); 2137 } 2138 } 2139 } 2140 2141 // Check that all instructions have their parent pointers set up correctly. 2142 for (auto &I : BB) 2143 { 2144 Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!"); 2145 } 2146 } 2147 2148 void Verifier::visitTerminatorInst(TerminatorInst &I) { 2149 // Ensure that terminators only exist at the end of the basic block. 2150 Assert(&I == I.getParent()->getTerminator(), 2151 "Terminator found in the middle of a basic block!", I.getParent()); 2152 visitInstruction(I); 2153 } 2154 2155 void Verifier::visitBranchInst(BranchInst &BI) { 2156 if (BI.isConditional()) { 2157 Assert(BI.getCondition()->getType()->isIntegerTy(1), 2158 "Branch condition is not 'i1' type!", &BI, BI.getCondition()); 2159 } 2160 visitTerminatorInst(BI); 2161 } 2162 2163 void Verifier::visitReturnInst(ReturnInst &RI) { 2164 Function *F = RI.getParent()->getParent(); 2165 unsigned N = RI.getNumOperands(); 2166 if (F->getReturnType()->isVoidTy()) 2167 Assert(N == 0, 2168 "Found return instr that returns non-void in Function of void " 2169 "return type!", 2170 &RI, F->getReturnType()); 2171 else 2172 Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(), 2173 "Function return type does not match operand " 2174 "type of return inst!", 2175 &RI, F->getReturnType()); 2176 2177 // Check to make sure that the return value has necessary properties for 2178 // terminators... 2179 visitTerminatorInst(RI); 2180 } 2181 2182 void Verifier::visitSwitchInst(SwitchInst &SI) { 2183 // Check to make sure that all of the constants in the switch instruction 2184 // have the same type as the switched-on value. 2185 Type *SwitchTy = SI.getCondition()->getType(); 2186 SmallPtrSet<ConstantInt*, 32> Constants; 2187 for (auto &Case : SI.cases()) { 2188 Assert(Case.getCaseValue()->getType() == SwitchTy, 2189 "Switch constants must all be same type as switch value!", &SI); 2190 Assert(Constants.insert(Case.getCaseValue()).second, 2191 "Duplicate integer as switch case", &SI, Case.getCaseValue()); 2192 } 2193 2194 visitTerminatorInst(SI); 2195 } 2196 2197 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { 2198 Assert(BI.getAddress()->getType()->isPointerTy(), 2199 "Indirectbr operand must have pointer type!", &BI); 2200 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i) 2201 Assert(BI.getDestination(i)->getType()->isLabelTy(), 2202 "Indirectbr destinations must all have pointer type!", &BI); 2203 2204 visitTerminatorInst(BI); 2205 } 2206 2207 void Verifier::visitSelectInst(SelectInst &SI) { 2208 Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1), 2209 SI.getOperand(2)), 2210 "Invalid operands for select instruction!", &SI); 2211 2212 Assert(SI.getTrueValue()->getType() == SI.getType(), 2213 "Select values must have same type as select instruction!", &SI); 2214 visitInstruction(SI); 2215 } 2216 2217 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of 2218 /// a pass, if any exist, it's an error. 2219 /// 2220 void Verifier::visitUserOp1(Instruction &I) { 2221 Assert(0, "User-defined operators should not live outside of a pass!", &I); 2222 } 2223 2224 void Verifier::visitTruncInst(TruncInst &I) { 2225 // Get the source and destination types 2226 Type *SrcTy = I.getOperand(0)->getType(); 2227 Type *DestTy = I.getType(); 2228 2229 // Get the size of the types in bits, we'll need this later 2230 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2231 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2232 2233 Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I); 2234 Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I); 2235 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2236 "trunc source and destination must both be a vector or neither", &I); 2237 Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I); 2238 2239 visitInstruction(I); 2240 } 2241 2242 void Verifier::visitZExtInst(ZExtInst &I) { 2243 // Get the source and destination types 2244 Type *SrcTy = I.getOperand(0)->getType(); 2245 Type *DestTy = I.getType(); 2246 2247 // Get the size of the types in bits, we'll need this later 2248 Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I); 2249 Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I); 2250 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2251 "zext source and destination must both be a vector or neither", &I); 2252 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2253 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2254 2255 Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I); 2256 2257 visitInstruction(I); 2258 } 2259 2260 void Verifier::visitSExtInst(SExtInst &I) { 2261 // Get the source and destination types 2262 Type *SrcTy = I.getOperand(0)->getType(); 2263 Type *DestTy = I.getType(); 2264 2265 // Get the size of the types in bits, we'll need this later 2266 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2267 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2268 2269 Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I); 2270 Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I); 2271 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2272 "sext source and destination must both be a vector or neither", &I); 2273 Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I); 2274 2275 visitInstruction(I); 2276 } 2277 2278 void Verifier::visitFPTruncInst(FPTruncInst &I) { 2279 // Get the source and destination types 2280 Type *SrcTy = I.getOperand(0)->getType(); 2281 Type *DestTy = I.getType(); 2282 // Get the size of the types in bits, we'll need this later 2283 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2284 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2285 2286 Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I); 2287 Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I); 2288 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2289 "fptrunc source and destination must both be a vector or neither", &I); 2290 Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I); 2291 2292 visitInstruction(I); 2293 } 2294 2295 void Verifier::visitFPExtInst(FPExtInst &I) { 2296 // Get the source and destination types 2297 Type *SrcTy = I.getOperand(0)->getType(); 2298 Type *DestTy = I.getType(); 2299 2300 // Get the size of the types in bits, we'll need this later 2301 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2302 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 2303 2304 Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I); 2305 Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I); 2306 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), 2307 "fpext source and destination must both be a vector or neither", &I); 2308 Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I); 2309 2310 visitInstruction(I); 2311 } 2312 2313 void Verifier::visitUIToFPInst(UIToFPInst &I) { 2314 // Get the source and destination types 2315 Type *SrcTy = I.getOperand(0)->getType(); 2316 Type *DestTy = I.getType(); 2317 2318 bool SrcVec = SrcTy->isVectorTy(); 2319 bool DstVec = DestTy->isVectorTy(); 2320 2321 Assert(SrcVec == DstVec, 2322 "UIToFP source and dest must both be vector or scalar", &I); 2323 Assert(SrcTy->isIntOrIntVectorTy(), 2324 "UIToFP source must be integer or integer vector", &I); 2325 Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector", 2326 &I); 2327 2328 if (SrcVec && DstVec) 2329 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2330 cast<VectorType>(DestTy)->getNumElements(), 2331 "UIToFP source and dest vector length mismatch", &I); 2332 2333 visitInstruction(I); 2334 } 2335 2336 void Verifier::visitSIToFPInst(SIToFPInst &I) { 2337 // Get the source and destination types 2338 Type *SrcTy = I.getOperand(0)->getType(); 2339 Type *DestTy = I.getType(); 2340 2341 bool SrcVec = SrcTy->isVectorTy(); 2342 bool DstVec = DestTy->isVectorTy(); 2343 2344 Assert(SrcVec == DstVec, 2345 "SIToFP source and dest must both be vector or scalar", &I); 2346 Assert(SrcTy->isIntOrIntVectorTy(), 2347 "SIToFP source must be integer or integer vector", &I); 2348 Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector", 2349 &I); 2350 2351 if (SrcVec && DstVec) 2352 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2353 cast<VectorType>(DestTy)->getNumElements(), 2354 "SIToFP source and dest vector length mismatch", &I); 2355 2356 visitInstruction(I); 2357 } 2358 2359 void Verifier::visitFPToUIInst(FPToUIInst &I) { 2360 // Get the source and destination types 2361 Type *SrcTy = I.getOperand(0)->getType(); 2362 Type *DestTy = I.getType(); 2363 2364 bool SrcVec = SrcTy->isVectorTy(); 2365 bool DstVec = DestTy->isVectorTy(); 2366 2367 Assert(SrcVec == DstVec, 2368 "FPToUI source and dest must both be vector or scalar", &I); 2369 Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", 2370 &I); 2371 Assert(DestTy->isIntOrIntVectorTy(), 2372 "FPToUI result must be integer or integer vector", &I); 2373 2374 if (SrcVec && DstVec) 2375 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2376 cast<VectorType>(DestTy)->getNumElements(), 2377 "FPToUI source and dest vector length mismatch", &I); 2378 2379 visitInstruction(I); 2380 } 2381 2382 void Verifier::visitFPToSIInst(FPToSIInst &I) { 2383 // Get the source and destination types 2384 Type *SrcTy = I.getOperand(0)->getType(); 2385 Type *DestTy = I.getType(); 2386 2387 bool SrcVec = SrcTy->isVectorTy(); 2388 bool DstVec = DestTy->isVectorTy(); 2389 2390 Assert(SrcVec == DstVec, 2391 "FPToSI source and dest must both be vector or scalar", &I); 2392 Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", 2393 &I); 2394 Assert(DestTy->isIntOrIntVectorTy(), 2395 "FPToSI result must be integer or integer vector", &I); 2396 2397 if (SrcVec && DstVec) 2398 Assert(cast<VectorType>(SrcTy)->getNumElements() == 2399 cast<VectorType>(DestTy)->getNumElements(), 2400 "FPToSI source and dest vector length mismatch", &I); 2401 2402 visitInstruction(I); 2403 } 2404 2405 void Verifier::visitPtrToIntInst(PtrToIntInst &I) { 2406 // Get the source and destination types 2407 Type *SrcTy = I.getOperand(0)->getType(); 2408 Type *DestTy = I.getType(); 2409 2410 Assert(SrcTy->getScalarType()->isPointerTy(), 2411 "PtrToInt source must be pointer", &I); 2412 Assert(DestTy->getScalarType()->isIntegerTy(), 2413 "PtrToInt result must be integral", &I); 2414 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch", 2415 &I); 2416 2417 if (SrcTy->isVectorTy()) { 2418 VectorType *VSrc = dyn_cast<VectorType>(SrcTy); 2419 VectorType *VDest = dyn_cast<VectorType>(DestTy); 2420 Assert(VSrc->getNumElements() == VDest->getNumElements(), 2421 "PtrToInt Vector width mismatch", &I); 2422 } 2423 2424 visitInstruction(I); 2425 } 2426 2427 void Verifier::visitIntToPtrInst(IntToPtrInst &I) { 2428 // Get the source and destination types 2429 Type *SrcTy = I.getOperand(0)->getType(); 2430 Type *DestTy = I.getType(); 2431 2432 Assert(SrcTy->getScalarType()->isIntegerTy(), 2433 "IntToPtr source must be an integral", &I); 2434 Assert(DestTy->getScalarType()->isPointerTy(), 2435 "IntToPtr result must be a pointer", &I); 2436 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch", 2437 &I); 2438 if (SrcTy->isVectorTy()) { 2439 VectorType *VSrc = dyn_cast<VectorType>(SrcTy); 2440 VectorType *VDest = dyn_cast<VectorType>(DestTy); 2441 Assert(VSrc->getNumElements() == VDest->getNumElements(), 2442 "IntToPtr Vector width mismatch", &I); 2443 } 2444 visitInstruction(I); 2445 } 2446 2447 void Verifier::visitBitCastInst(BitCastInst &I) { 2448 Assert( 2449 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()), 2450 "Invalid bitcast", &I); 2451 visitInstruction(I); 2452 } 2453 2454 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) { 2455 Type *SrcTy = I.getOperand(0)->getType(); 2456 Type *DestTy = I.getType(); 2457 2458 Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer", 2459 &I); 2460 Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer", 2461 &I); 2462 Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(), 2463 "AddrSpaceCast must be between different address spaces", &I); 2464 if (SrcTy->isVectorTy()) 2465 Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(), 2466 "AddrSpaceCast vector pointer number of elements mismatch", &I); 2467 visitInstruction(I); 2468 } 2469 2470 /// visitPHINode - Ensure that a PHI node is well formed. 2471 /// 2472 void Verifier::visitPHINode(PHINode &PN) { 2473 // Ensure that the PHI nodes are all grouped together at the top of the block. 2474 // This can be tested by checking whether the instruction before this is 2475 // either nonexistent (because this is begin()) or is a PHI node. If not, 2476 // then there is some other instruction before a PHI. 2477 Assert(&PN == &PN.getParent()->front() || 2478 isa<PHINode>(--BasicBlock::iterator(&PN)), 2479 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent()); 2480 2481 // Check that a PHI doesn't yield a Token. 2482 Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!"); 2483 2484 // Check that all of the values of the PHI node have the same type as the 2485 // result, and that the incoming blocks are really basic blocks. 2486 for (Value *IncValue : PN.incoming_values()) { 2487 Assert(PN.getType() == IncValue->getType(), 2488 "PHI node operands are not the same type as the result!", &PN); 2489 } 2490 2491 // All other PHI node constraints are checked in the visitBasicBlock method. 2492 2493 visitInstruction(PN); 2494 } 2495 2496 void Verifier::verifyCallSite(CallSite CS) { 2497 Instruction *I = CS.getInstruction(); 2498 2499 Assert(CS.getCalledValue()->getType()->isPointerTy(), 2500 "Called function must be a pointer!", I); 2501 PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType()); 2502 2503 Assert(FPTy->getElementType()->isFunctionTy(), 2504 "Called function is not pointer to function type!", I); 2505 2506 Assert(FPTy->getElementType() == CS.getFunctionType(), 2507 "Called function is not the same type as the call!", I); 2508 2509 FunctionType *FTy = CS.getFunctionType(); 2510 2511 // Verify that the correct number of arguments are being passed 2512 if (FTy->isVarArg()) 2513 Assert(CS.arg_size() >= FTy->getNumParams(), 2514 "Called function requires more parameters than were provided!", I); 2515 else 2516 Assert(CS.arg_size() == FTy->getNumParams(), 2517 "Incorrect number of arguments passed to called function!", I); 2518 2519 // Verify that all arguments to the call match the function type. 2520 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 2521 Assert(CS.getArgument(i)->getType() == FTy->getParamType(i), 2522 "Call parameter type does not match function signature!", 2523 CS.getArgument(i), FTy->getParamType(i), I); 2524 2525 AttributeSet Attrs = CS.getAttributes(); 2526 2527 Assert(verifyAttributeCount(Attrs, CS.arg_size()), 2528 "Attribute after last parameter!", I); 2529 2530 // Verify call attributes. 2531 verifyFunctionAttrs(FTy, Attrs, I); 2532 2533 // Conservatively check the inalloca argument. 2534 // We have a bug if we can find that there is an underlying alloca without 2535 // inalloca. 2536 if (CS.hasInAllocaArgument()) { 2537 Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1); 2538 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets())) 2539 Assert(AI->isUsedWithInAlloca(), 2540 "inalloca argument for call has mismatched alloca", AI, I); 2541 } 2542 2543 // For each argument of the callsite, if it has the swifterror argument, 2544 // make sure the underlying alloca has swifterror as well. 2545 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) 2546 if (CS.paramHasAttr(i+1, Attribute::SwiftError)) { 2547 Value *SwiftErrorArg = CS.getArgument(i); 2548 auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets()); 2549 Assert(AI, "swifterror argument should come from alloca", AI, I); 2550 if (AI) 2551 Assert(AI->isSwiftError(), 2552 "swifterror argument for call has mismatched alloca", AI, I); 2553 } 2554 2555 if (FTy->isVarArg()) { 2556 // FIXME? is 'nest' even legal here? 2557 bool SawNest = false; 2558 bool SawReturned = false; 2559 2560 for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) { 2561 if (Attrs.hasAttribute(Idx, Attribute::Nest)) 2562 SawNest = true; 2563 if (Attrs.hasAttribute(Idx, Attribute::Returned)) 2564 SawReturned = true; 2565 } 2566 2567 // Check attributes on the varargs part. 2568 for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) { 2569 Type *Ty = CS.getArgument(Idx-1)->getType(); 2570 verifyParameterAttrs(Attrs, Idx, Ty, false, I); 2571 2572 if (Attrs.hasAttribute(Idx, Attribute::Nest)) { 2573 Assert(!SawNest, "More than one parameter has attribute nest!", I); 2574 SawNest = true; 2575 } 2576 2577 if (Attrs.hasAttribute(Idx, Attribute::Returned)) { 2578 Assert(!SawReturned, "More than one parameter has attribute returned!", 2579 I); 2580 Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()), 2581 "Incompatible argument and return types for 'returned' " 2582 "attribute", 2583 I); 2584 SawReturned = true; 2585 } 2586 2587 Assert(!Attrs.hasAttribute(Idx, Attribute::StructRet), 2588 "Attribute 'sret' cannot be used for vararg call arguments!", I); 2589 2590 if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) 2591 Assert(Idx == CS.arg_size(), "inalloca isn't on the last argument!", I); 2592 } 2593 } 2594 2595 // Verify that there's no metadata unless it's a direct call to an intrinsic. 2596 if (CS.getCalledFunction() == nullptr || 2597 !CS.getCalledFunction()->getName().startswith("llvm.")) { 2598 for (Type *ParamTy : FTy->params()) { 2599 Assert(!ParamTy->isMetadataTy(), 2600 "Function has metadata parameter but isn't an intrinsic", I); 2601 Assert(!ParamTy->isTokenTy(), 2602 "Function has token parameter but isn't an intrinsic", I); 2603 } 2604 } 2605 2606 // Verify that indirect calls don't return tokens. 2607 if (CS.getCalledFunction() == nullptr) 2608 Assert(!FTy->getReturnType()->isTokenTy(), 2609 "Return type cannot be token for indirect call!"); 2610 2611 if (Function *F = CS.getCalledFunction()) 2612 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) 2613 visitIntrinsicCallSite(ID, CS); 2614 2615 // Verify that a callsite has at most one "deopt", at most one "funclet" and 2616 // at most one "gc-transition" operand bundle. 2617 bool FoundDeoptBundle = false, FoundFuncletBundle = false, 2618 FoundGCTransitionBundle = false; 2619 for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) { 2620 OperandBundleUse BU = CS.getOperandBundleAt(i); 2621 uint32_t Tag = BU.getTagID(); 2622 if (Tag == LLVMContext::OB_deopt) { 2623 Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I); 2624 FoundDeoptBundle = true; 2625 } else if (Tag == LLVMContext::OB_gc_transition) { 2626 Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles", 2627 I); 2628 FoundGCTransitionBundle = true; 2629 } else if (Tag == LLVMContext::OB_funclet) { 2630 Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I); 2631 FoundFuncletBundle = true; 2632 Assert(BU.Inputs.size() == 1, 2633 "Expected exactly one funclet bundle operand", I); 2634 Assert(isa<FuncletPadInst>(BU.Inputs.front()), 2635 "Funclet bundle operands should correspond to a FuncletPadInst", 2636 I); 2637 } 2638 } 2639 2640 // Verify that each inlinable callsite of a debug-info-bearing function in a 2641 // debug-info-bearing function has a debug location attached to it. Failure to 2642 // do so causes assertion failures when the inliner sets up inline scope info. 2643 if (I->getFunction()->getSubprogram() && CS.getCalledFunction() && 2644 CS.getCalledFunction()->getSubprogram()) 2645 Assert(I->getDebugLoc(), "inlinable function call in a function with debug " 2646 "info must have a !dbg location", 2647 I); 2648 2649 visitInstruction(*I); 2650 } 2651 2652 /// Two types are "congruent" if they are identical, or if they are both pointer 2653 /// types with different pointee types and the same address space. 2654 static bool isTypeCongruent(Type *L, Type *R) { 2655 if (L == R) 2656 return true; 2657 PointerType *PL = dyn_cast<PointerType>(L); 2658 PointerType *PR = dyn_cast<PointerType>(R); 2659 if (!PL || !PR) 2660 return false; 2661 return PL->getAddressSpace() == PR->getAddressSpace(); 2662 } 2663 2664 static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) { 2665 static const Attribute::AttrKind ABIAttrs[] = { 2666 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 2667 Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf, 2668 Attribute::SwiftError}; 2669 AttrBuilder Copy; 2670 for (auto AK : ABIAttrs) { 2671 if (Attrs.hasAttribute(I + 1, AK)) 2672 Copy.addAttribute(AK); 2673 } 2674 if (Attrs.hasAttribute(I + 1, Attribute::Alignment)) 2675 Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1)); 2676 return Copy; 2677 } 2678 2679 void Verifier::verifyMustTailCall(CallInst &CI) { 2680 Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI); 2681 2682 // - The caller and callee prototypes must match. Pointer types of 2683 // parameters or return types may differ in pointee type, but not 2684 // address space. 2685 Function *F = CI.getParent()->getParent(); 2686 FunctionType *CallerTy = F->getFunctionType(); 2687 FunctionType *CalleeTy = CI.getFunctionType(); 2688 Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(), 2689 "cannot guarantee tail call due to mismatched parameter counts", &CI); 2690 Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(), 2691 "cannot guarantee tail call due to mismatched varargs", &CI); 2692 Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()), 2693 "cannot guarantee tail call due to mismatched return types", &CI); 2694 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) { 2695 Assert( 2696 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)), 2697 "cannot guarantee tail call due to mismatched parameter types", &CI); 2698 } 2699 2700 // - The calling conventions of the caller and callee must match. 2701 Assert(F->getCallingConv() == CI.getCallingConv(), 2702 "cannot guarantee tail call due to mismatched calling conv", &CI); 2703 2704 // - All ABI-impacting function attributes, such as sret, byval, inreg, 2705 // returned, and inalloca, must match. 2706 AttributeSet CallerAttrs = F->getAttributes(); 2707 AttributeSet CalleeAttrs = CI.getAttributes(); 2708 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) { 2709 AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs); 2710 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs); 2711 Assert(CallerABIAttrs == CalleeABIAttrs, 2712 "cannot guarantee tail call due to mismatched ABI impacting " 2713 "function attributes", 2714 &CI, CI.getOperand(I)); 2715 } 2716 2717 // - The call must immediately precede a :ref:`ret <i_ret>` instruction, 2718 // or a pointer bitcast followed by a ret instruction. 2719 // - The ret instruction must return the (possibly bitcasted) value 2720 // produced by the call or void. 2721 Value *RetVal = &CI; 2722 Instruction *Next = CI.getNextNode(); 2723 2724 // Handle the optional bitcast. 2725 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) { 2726 Assert(BI->getOperand(0) == RetVal, 2727 "bitcast following musttail call must use the call", BI); 2728 RetVal = BI; 2729 Next = BI->getNextNode(); 2730 } 2731 2732 // Check the return. 2733 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next); 2734 Assert(Ret, "musttail call must be precede a ret with an optional bitcast", 2735 &CI); 2736 Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal, 2737 "musttail call result must be returned", Ret); 2738 } 2739 2740 void Verifier::visitCallInst(CallInst &CI) { 2741 verifyCallSite(&CI); 2742 2743 if (CI.isMustTailCall()) 2744 verifyMustTailCall(CI); 2745 } 2746 2747 void Verifier::visitInvokeInst(InvokeInst &II) { 2748 verifyCallSite(&II); 2749 2750 // Verify that the first non-PHI instruction of the unwind destination is an 2751 // exception handling instruction. 2752 Assert( 2753 II.getUnwindDest()->isEHPad(), 2754 "The unwind destination does not have an exception handling instruction!", 2755 &II); 2756 2757 visitTerminatorInst(II); 2758 } 2759 2760 /// visitBinaryOperator - Check that both arguments to the binary operator are 2761 /// of the same type! 2762 /// 2763 void Verifier::visitBinaryOperator(BinaryOperator &B) { 2764 Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(), 2765 "Both operands to a binary operator are not of the same type!", &B); 2766 2767 switch (B.getOpcode()) { 2768 // Check that integer arithmetic operators are only used with 2769 // integral operands. 2770 case Instruction::Add: 2771 case Instruction::Sub: 2772 case Instruction::Mul: 2773 case Instruction::SDiv: 2774 case Instruction::UDiv: 2775 case Instruction::SRem: 2776 case Instruction::URem: 2777 Assert(B.getType()->isIntOrIntVectorTy(), 2778 "Integer arithmetic operators only work with integral types!", &B); 2779 Assert(B.getType() == B.getOperand(0)->getType(), 2780 "Integer arithmetic operators must have same type " 2781 "for operands and result!", 2782 &B); 2783 break; 2784 // Check that floating-point arithmetic operators are only used with 2785 // floating-point operands. 2786 case Instruction::FAdd: 2787 case Instruction::FSub: 2788 case Instruction::FMul: 2789 case Instruction::FDiv: 2790 case Instruction::FRem: 2791 Assert(B.getType()->isFPOrFPVectorTy(), 2792 "Floating-point arithmetic operators only work with " 2793 "floating-point types!", 2794 &B); 2795 Assert(B.getType() == B.getOperand(0)->getType(), 2796 "Floating-point arithmetic operators must have same type " 2797 "for operands and result!", 2798 &B); 2799 break; 2800 // Check that logical operators are only used with integral operands. 2801 case Instruction::And: 2802 case Instruction::Or: 2803 case Instruction::Xor: 2804 Assert(B.getType()->isIntOrIntVectorTy(), 2805 "Logical operators only work with integral types!", &B); 2806 Assert(B.getType() == B.getOperand(0)->getType(), 2807 "Logical operators must have same type for operands and result!", 2808 &B); 2809 break; 2810 case Instruction::Shl: 2811 case Instruction::LShr: 2812 case Instruction::AShr: 2813 Assert(B.getType()->isIntOrIntVectorTy(), 2814 "Shifts only work with integral types!", &B); 2815 Assert(B.getType() == B.getOperand(0)->getType(), 2816 "Shift return type must be same as operands!", &B); 2817 break; 2818 default: 2819 llvm_unreachable("Unknown BinaryOperator opcode!"); 2820 } 2821 2822 visitInstruction(B); 2823 } 2824 2825 void Verifier::visitICmpInst(ICmpInst &IC) { 2826 // Check that the operands are the same type 2827 Type *Op0Ty = IC.getOperand(0)->getType(); 2828 Type *Op1Ty = IC.getOperand(1)->getType(); 2829 Assert(Op0Ty == Op1Ty, 2830 "Both operands to ICmp instruction are not of the same type!", &IC); 2831 // Check that the operands are the right type 2832 Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(), 2833 "Invalid operand types for ICmp instruction", &IC); 2834 // Check that the predicate is valid. 2835 Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE && 2836 IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE, 2837 "Invalid predicate in ICmp instruction!", &IC); 2838 2839 visitInstruction(IC); 2840 } 2841 2842 void Verifier::visitFCmpInst(FCmpInst &FC) { 2843 // Check that the operands are the same type 2844 Type *Op0Ty = FC.getOperand(0)->getType(); 2845 Type *Op1Ty = FC.getOperand(1)->getType(); 2846 Assert(Op0Ty == Op1Ty, 2847 "Both operands to FCmp instruction are not of the same type!", &FC); 2848 // Check that the operands are the right type 2849 Assert(Op0Ty->isFPOrFPVectorTy(), 2850 "Invalid operand types for FCmp instruction", &FC); 2851 // Check that the predicate is valid. 2852 Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE && 2853 FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE, 2854 "Invalid predicate in FCmp instruction!", &FC); 2855 2856 visitInstruction(FC); 2857 } 2858 2859 void Verifier::visitExtractElementInst(ExtractElementInst &EI) { 2860 Assert( 2861 ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)), 2862 "Invalid extractelement operands!", &EI); 2863 visitInstruction(EI); 2864 } 2865 2866 void Verifier::visitInsertElementInst(InsertElementInst &IE) { 2867 Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1), 2868 IE.getOperand(2)), 2869 "Invalid insertelement operands!", &IE); 2870 visitInstruction(IE); 2871 } 2872 2873 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) { 2874 Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1), 2875 SV.getOperand(2)), 2876 "Invalid shufflevector operands!", &SV); 2877 visitInstruction(SV); 2878 } 2879 2880 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) { 2881 Type *TargetTy = GEP.getPointerOperandType()->getScalarType(); 2882 2883 Assert(isa<PointerType>(TargetTy), 2884 "GEP base pointer is not a vector or a vector of pointers", &GEP); 2885 Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP); 2886 SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end()); 2887 Type *ElTy = 2888 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs); 2889 Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP); 2890 2891 Assert(GEP.getType()->getScalarType()->isPointerTy() && 2892 GEP.getResultElementType() == ElTy, 2893 "GEP is not of right type for indices!", &GEP, ElTy); 2894 2895 if (GEP.getType()->isVectorTy()) { 2896 // Additional checks for vector GEPs. 2897 unsigned GEPWidth = GEP.getType()->getVectorNumElements(); 2898 if (GEP.getPointerOperandType()->isVectorTy()) 2899 Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(), 2900 "Vector GEP result width doesn't match operand's", &GEP); 2901 for (Value *Idx : Idxs) { 2902 Type *IndexTy = Idx->getType(); 2903 if (IndexTy->isVectorTy()) { 2904 unsigned IndexWidth = IndexTy->getVectorNumElements(); 2905 Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP); 2906 } 2907 Assert(IndexTy->getScalarType()->isIntegerTy(), 2908 "All GEP indices should be of integer type"); 2909 } 2910 } 2911 visitInstruction(GEP); 2912 } 2913 2914 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) { 2915 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper(); 2916 } 2917 2918 void Verifier::visitRangeMetadata(Instruction& I, 2919 MDNode* Range, Type* Ty) { 2920 assert(Range && 2921 Range == I.getMetadata(LLVMContext::MD_range) && 2922 "precondition violation"); 2923 2924 unsigned NumOperands = Range->getNumOperands(); 2925 Assert(NumOperands % 2 == 0, "Unfinished range!", Range); 2926 unsigned NumRanges = NumOperands / 2; 2927 Assert(NumRanges >= 1, "It should have at least one range!", Range); 2928 2929 ConstantRange LastRange(1); // Dummy initial value 2930 for (unsigned i = 0; i < NumRanges; ++i) { 2931 ConstantInt *Low = 2932 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i)); 2933 Assert(Low, "The lower limit must be an integer!", Low); 2934 ConstantInt *High = 2935 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1)); 2936 Assert(High, "The upper limit must be an integer!", High); 2937 Assert(High->getType() == Low->getType() && High->getType() == Ty, 2938 "Range types must match instruction type!", &I); 2939 2940 APInt HighV = High->getValue(); 2941 APInt LowV = Low->getValue(); 2942 ConstantRange CurRange(LowV, HighV); 2943 Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(), 2944 "Range must not be empty!", Range); 2945 if (i != 0) { 2946 Assert(CurRange.intersectWith(LastRange).isEmptySet(), 2947 "Intervals are overlapping", Range); 2948 Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order", 2949 Range); 2950 Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous", 2951 Range); 2952 } 2953 LastRange = ConstantRange(LowV, HighV); 2954 } 2955 if (NumRanges > 2) { 2956 APInt FirstLow = 2957 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue(); 2958 APInt FirstHigh = 2959 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue(); 2960 ConstantRange FirstRange(FirstLow, FirstHigh); 2961 Assert(FirstRange.intersectWith(LastRange).isEmptySet(), 2962 "Intervals are overlapping", Range); 2963 Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous", 2964 Range); 2965 } 2966 } 2967 2968 void Verifier::checkAtomicMemAccessSize(const Module *M, Type *Ty, 2969 const Instruction *I) { 2970 unsigned Size = M->getDataLayout().getTypeSizeInBits(Ty); 2971 Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I); 2972 Assert(!(Size & (Size - 1)), 2973 "atomic memory access' operand must have a power-of-two size", Ty, I); 2974 } 2975 2976 void Verifier::visitLoadInst(LoadInst &LI) { 2977 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType()); 2978 Assert(PTy, "Load operand must be a pointer.", &LI); 2979 Type *ElTy = LI.getType(); 2980 Assert(LI.getAlignment() <= Value::MaximumAlignment, 2981 "huge alignment values are unsupported", &LI); 2982 Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI); 2983 if (LI.isAtomic()) { 2984 Assert(LI.getOrdering() != AtomicOrdering::Release && 2985 LI.getOrdering() != AtomicOrdering::AcquireRelease, 2986 "Load cannot have Release ordering", &LI); 2987 Assert(LI.getAlignment() != 0, 2988 "Atomic load must specify explicit alignment", &LI); 2989 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() || 2990 ElTy->isFloatingPointTy(), 2991 "atomic load operand must have integer, pointer, or floating point " 2992 "type!", 2993 ElTy, &LI); 2994 checkAtomicMemAccessSize(M, ElTy, &LI); 2995 } else { 2996 Assert(LI.getSynchScope() == CrossThread, 2997 "Non-atomic load cannot have SynchronizationScope specified", &LI); 2998 } 2999 3000 visitInstruction(LI); 3001 } 3002 3003 void Verifier::visitStoreInst(StoreInst &SI) { 3004 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType()); 3005 Assert(PTy, "Store operand must be a pointer.", &SI); 3006 Type *ElTy = PTy->getElementType(); 3007 Assert(ElTy == SI.getOperand(0)->getType(), 3008 "Stored value type does not match pointer operand type!", &SI, ElTy); 3009 Assert(SI.getAlignment() <= Value::MaximumAlignment, 3010 "huge alignment values are unsupported", &SI); 3011 Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI); 3012 if (SI.isAtomic()) { 3013 Assert(SI.getOrdering() != AtomicOrdering::Acquire && 3014 SI.getOrdering() != AtomicOrdering::AcquireRelease, 3015 "Store cannot have Acquire ordering", &SI); 3016 Assert(SI.getAlignment() != 0, 3017 "Atomic store must specify explicit alignment", &SI); 3018 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() || 3019 ElTy->isFloatingPointTy(), 3020 "atomic store operand must have integer, pointer, or floating point " 3021 "type!", 3022 ElTy, &SI); 3023 checkAtomicMemAccessSize(M, ElTy, &SI); 3024 } else { 3025 Assert(SI.getSynchScope() == CrossThread, 3026 "Non-atomic store cannot have SynchronizationScope specified", &SI); 3027 } 3028 visitInstruction(SI); 3029 } 3030 3031 /// Check that SwiftErrorVal is used as a swifterror argument in CS. 3032 void Verifier::verifySwiftErrorCallSite(CallSite CS, 3033 const Value *SwiftErrorVal) { 3034 unsigned Idx = 0; 3035 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 3036 I != E; ++I, ++Idx) { 3037 if (*I == SwiftErrorVal) { 3038 Assert(CS.paramHasAttr(Idx+1, Attribute::SwiftError), 3039 "swifterror value when used in a callsite should be marked " 3040 "with swifterror attribute", 3041 SwiftErrorVal, CS); 3042 } 3043 } 3044 } 3045 3046 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) { 3047 // Check that swifterror value is only used by loads, stores, or as 3048 // a swifterror argument. 3049 for (const User *U : SwiftErrorVal->users()) { 3050 Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) || 3051 isa<InvokeInst>(U), 3052 "swifterror value can only be loaded and stored from, or " 3053 "as a swifterror argument!", 3054 SwiftErrorVal, U); 3055 // If it is used by a store, check it is the second operand. 3056 if (auto StoreI = dyn_cast<StoreInst>(U)) 3057 Assert(StoreI->getOperand(1) == SwiftErrorVal, 3058 "swifterror value should be the second operand when used " 3059 "by stores", SwiftErrorVal, U); 3060 if (auto CallI = dyn_cast<CallInst>(U)) 3061 verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal); 3062 if (auto II = dyn_cast<InvokeInst>(U)) 3063 verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal); 3064 } 3065 } 3066 3067 void Verifier::visitAllocaInst(AllocaInst &AI) { 3068 SmallPtrSet<Type*, 4> Visited; 3069 PointerType *PTy = AI.getType(); 3070 Assert(PTy->getAddressSpace() == 0, 3071 "Allocation instruction pointer not in the generic address space!", 3072 &AI); 3073 Assert(AI.getAllocatedType()->isSized(&Visited), 3074 "Cannot allocate unsized type", &AI); 3075 Assert(AI.getArraySize()->getType()->isIntegerTy(), 3076 "Alloca array size must have integer type", &AI); 3077 Assert(AI.getAlignment() <= Value::MaximumAlignment, 3078 "huge alignment values are unsupported", &AI); 3079 3080 if (AI.isSwiftError()) { 3081 verifySwiftErrorValue(&AI); 3082 } 3083 3084 visitInstruction(AI); 3085 } 3086 3087 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { 3088 3089 // FIXME: more conditions??? 3090 Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic, 3091 "cmpxchg instructions must be atomic.", &CXI); 3092 Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic, 3093 "cmpxchg instructions must be atomic.", &CXI); 3094 Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered, 3095 "cmpxchg instructions cannot be unordered.", &CXI); 3096 Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered, 3097 "cmpxchg instructions cannot be unordered.", &CXI); 3098 Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()), 3099 "cmpxchg instructions failure argument shall be no stronger than the " 3100 "success argument", 3101 &CXI); 3102 Assert(CXI.getFailureOrdering() != AtomicOrdering::Release && 3103 CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease, 3104 "cmpxchg failure ordering cannot include release semantics", &CXI); 3105 3106 PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType()); 3107 Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI); 3108 Type *ElTy = PTy->getElementType(); 3109 Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(), 3110 "cmpxchg operand must have integer or pointer type", 3111 ElTy, &CXI); 3112 checkAtomicMemAccessSize(M, ElTy, &CXI); 3113 Assert(ElTy == CXI.getOperand(1)->getType(), 3114 "Expected value type does not match pointer operand type!", &CXI, 3115 ElTy); 3116 Assert(ElTy == CXI.getOperand(2)->getType(), 3117 "Stored value type does not match pointer operand type!", &CXI, ElTy); 3118 visitInstruction(CXI); 3119 } 3120 3121 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { 3122 Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic, 3123 "atomicrmw instructions must be atomic.", &RMWI); 3124 Assert(RMWI.getOrdering() != AtomicOrdering::Unordered, 3125 "atomicrmw instructions cannot be unordered.", &RMWI); 3126 PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); 3127 Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); 3128 Type *ElTy = PTy->getElementType(); 3129 Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!", 3130 &RMWI, ElTy); 3131 checkAtomicMemAccessSize(M, ElTy, &RMWI); 3132 Assert(ElTy == RMWI.getOperand(1)->getType(), 3133 "Argument value type does not match pointer operand type!", &RMWI, 3134 ElTy); 3135 Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() && 3136 RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP, 3137 "Invalid binary operation!", &RMWI); 3138 visitInstruction(RMWI); 3139 } 3140 3141 void Verifier::visitFenceInst(FenceInst &FI) { 3142 const AtomicOrdering Ordering = FI.getOrdering(); 3143 Assert(Ordering == AtomicOrdering::Acquire || 3144 Ordering == AtomicOrdering::Release || 3145 Ordering == AtomicOrdering::AcquireRelease || 3146 Ordering == AtomicOrdering::SequentiallyConsistent, 3147 "fence instructions may only have acquire, release, acq_rel, or " 3148 "seq_cst ordering.", 3149 &FI); 3150 visitInstruction(FI); 3151 } 3152 3153 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) { 3154 Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(), 3155 EVI.getIndices()) == EVI.getType(), 3156 "Invalid ExtractValueInst operands!", &EVI); 3157 3158 visitInstruction(EVI); 3159 } 3160 3161 void Verifier::visitInsertValueInst(InsertValueInst &IVI) { 3162 Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(), 3163 IVI.getIndices()) == 3164 IVI.getOperand(1)->getType(), 3165 "Invalid InsertValueInst operands!", &IVI); 3166 3167 visitInstruction(IVI); 3168 } 3169 3170 static Value *getParentPad(Value *EHPad) { 3171 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 3172 return FPI->getParentPad(); 3173 3174 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 3175 } 3176 3177 void Verifier::visitEHPadPredecessors(Instruction &I) { 3178 assert(I.isEHPad()); 3179 3180 BasicBlock *BB = I.getParent(); 3181 Function *F = BB->getParent(); 3182 3183 Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I); 3184 3185 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) { 3186 // The landingpad instruction defines its parent as a landing pad block. The 3187 // landing pad block may be branched to only by the unwind edge of an 3188 // invoke. 3189 for (BasicBlock *PredBB : predecessors(BB)) { 3190 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator()); 3191 Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB, 3192 "Block containing LandingPadInst must be jumped to " 3193 "only by the unwind edge of an invoke.", 3194 LPI); 3195 } 3196 return; 3197 } 3198 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) { 3199 if (!pred_empty(BB)) 3200 Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(), 3201 "Block containg CatchPadInst must be jumped to " 3202 "only by its catchswitch.", 3203 CPI); 3204 Assert(BB != CPI->getCatchSwitch()->getUnwindDest(), 3205 "Catchswitch cannot unwind to one of its catchpads", 3206 CPI->getCatchSwitch(), CPI); 3207 return; 3208 } 3209 3210 // Verify that each pred has a legal terminator with a legal to/from EH 3211 // pad relationship. 3212 Instruction *ToPad = &I; 3213 Value *ToPadParent = getParentPad(ToPad); 3214 for (BasicBlock *PredBB : predecessors(BB)) { 3215 TerminatorInst *TI = PredBB->getTerminator(); 3216 Value *FromPad; 3217 if (auto *II = dyn_cast<InvokeInst>(TI)) { 3218 Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB, 3219 "EH pad must be jumped to via an unwind edge", ToPad, II); 3220 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet)) 3221 FromPad = Bundle->Inputs[0]; 3222 else 3223 FromPad = ConstantTokenNone::get(II->getContext()); 3224 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 3225 FromPad = CRI->getOperand(0); 3226 Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI); 3227 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) { 3228 FromPad = CSI; 3229 } else { 3230 Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI); 3231 } 3232 3233 // The edge may exit from zero or more nested pads. 3234 SmallSet<Value *, 8> Seen; 3235 for (;; FromPad = getParentPad(FromPad)) { 3236 Assert(FromPad != ToPad, 3237 "EH pad cannot handle exceptions raised within it", FromPad, TI); 3238 if (FromPad == ToPadParent) { 3239 // This is a legal unwind edge. 3240 break; 3241 } 3242 Assert(!isa<ConstantTokenNone>(FromPad), 3243 "A single unwind edge may only enter one EH pad", TI); 3244 Assert(Seen.insert(FromPad).second, 3245 "EH pad jumps through a cycle of pads", FromPad); 3246 } 3247 } 3248 } 3249 3250 void Verifier::visitLandingPadInst(LandingPadInst &LPI) { 3251 // The landingpad instruction is ill-formed if it doesn't have any clauses and 3252 // isn't a cleanup. 3253 Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(), 3254 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI); 3255 3256 visitEHPadPredecessors(LPI); 3257 3258 if (!LandingPadResultTy) 3259 LandingPadResultTy = LPI.getType(); 3260 else 3261 Assert(LandingPadResultTy == LPI.getType(), 3262 "The landingpad instruction should have a consistent result type " 3263 "inside a function.", 3264 &LPI); 3265 3266 Function *F = LPI.getParent()->getParent(); 3267 Assert(F->hasPersonalityFn(), 3268 "LandingPadInst needs to be in a function with a personality.", &LPI); 3269 3270 // The landingpad instruction must be the first non-PHI instruction in the 3271 // block. 3272 Assert(LPI.getParent()->getLandingPadInst() == &LPI, 3273 "LandingPadInst not the first non-PHI instruction in the block.", 3274 &LPI); 3275 3276 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) { 3277 Constant *Clause = LPI.getClause(i); 3278 if (LPI.isCatch(i)) { 3279 Assert(isa<PointerType>(Clause->getType()), 3280 "Catch operand does not have pointer type!", &LPI); 3281 } else { 3282 Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI); 3283 Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause), 3284 "Filter operand is not an array of constants!", &LPI); 3285 } 3286 } 3287 3288 visitInstruction(LPI); 3289 } 3290 3291 void Verifier::visitCatchPadInst(CatchPadInst &CPI) { 3292 BasicBlock *BB = CPI.getParent(); 3293 3294 Function *F = BB->getParent(); 3295 Assert(F->hasPersonalityFn(), 3296 "CatchPadInst needs to be in a function with a personality.", &CPI); 3297 3298 Assert(isa<CatchSwitchInst>(CPI.getParentPad()), 3299 "CatchPadInst needs to be directly nested in a CatchSwitchInst.", 3300 CPI.getParentPad()); 3301 3302 // The catchpad instruction must be the first non-PHI instruction in the 3303 // block. 3304 Assert(BB->getFirstNonPHI() == &CPI, 3305 "CatchPadInst not the first non-PHI instruction in the block.", &CPI); 3306 3307 visitEHPadPredecessors(CPI); 3308 visitFuncletPadInst(CPI); 3309 } 3310 3311 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) { 3312 Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)), 3313 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn, 3314 CatchReturn.getOperand(0)); 3315 3316 visitTerminatorInst(CatchReturn); 3317 } 3318 3319 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) { 3320 BasicBlock *BB = CPI.getParent(); 3321 3322 Function *F = BB->getParent(); 3323 Assert(F->hasPersonalityFn(), 3324 "CleanupPadInst needs to be in a function with a personality.", &CPI); 3325 3326 // The cleanuppad instruction must be the first non-PHI instruction in the 3327 // block. 3328 Assert(BB->getFirstNonPHI() == &CPI, 3329 "CleanupPadInst not the first non-PHI instruction in the block.", 3330 &CPI); 3331 3332 auto *ParentPad = CPI.getParentPad(); 3333 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), 3334 "CleanupPadInst has an invalid parent.", &CPI); 3335 3336 visitEHPadPredecessors(CPI); 3337 visitFuncletPadInst(CPI); 3338 } 3339 3340 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) { 3341 User *FirstUser = nullptr; 3342 Value *FirstUnwindPad = nullptr; 3343 SmallVector<FuncletPadInst *, 8> Worklist({&FPI}); 3344 SmallSet<FuncletPadInst *, 8> Seen; 3345 3346 while (!Worklist.empty()) { 3347 FuncletPadInst *CurrentPad = Worklist.pop_back_val(); 3348 Assert(Seen.insert(CurrentPad).second, 3349 "FuncletPadInst must not be nested within itself", CurrentPad); 3350 Value *UnresolvedAncestorPad = nullptr; 3351 for (User *U : CurrentPad->users()) { 3352 BasicBlock *UnwindDest; 3353 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) { 3354 UnwindDest = CRI->getUnwindDest(); 3355 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) { 3356 // We allow catchswitch unwind to caller to nest 3357 // within an outer pad that unwinds somewhere else, 3358 // because catchswitch doesn't have a nounwind variant. 3359 // See e.g. SimplifyCFGOpt::SimplifyUnreachable. 3360 if (CSI->unwindsToCaller()) 3361 continue; 3362 UnwindDest = CSI->getUnwindDest(); 3363 } else if (auto *II = dyn_cast<InvokeInst>(U)) { 3364 UnwindDest = II->getUnwindDest(); 3365 } else if (isa<CallInst>(U)) { 3366 // Calls which don't unwind may be found inside funclet 3367 // pads that unwind somewhere else. We don't *require* 3368 // such calls to be annotated nounwind. 3369 continue; 3370 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) { 3371 // The unwind dest for a cleanup can only be found by 3372 // recursive search. Add it to the worklist, and we'll 3373 // search for its first use that determines where it unwinds. 3374 Worklist.push_back(CPI); 3375 continue; 3376 } else { 3377 Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U); 3378 continue; 3379 } 3380 3381 Value *UnwindPad; 3382 bool ExitsFPI; 3383 if (UnwindDest) { 3384 UnwindPad = UnwindDest->getFirstNonPHI(); 3385 if (!cast<Instruction>(UnwindPad)->isEHPad()) 3386 continue; 3387 Value *UnwindParent = getParentPad(UnwindPad); 3388 // Ignore unwind edges that don't exit CurrentPad. 3389 if (UnwindParent == CurrentPad) 3390 continue; 3391 // Determine whether the original funclet pad is exited, 3392 // and if we are scanning nested pads determine how many 3393 // of them are exited so we can stop searching their 3394 // children. 3395 Value *ExitedPad = CurrentPad; 3396 ExitsFPI = false; 3397 do { 3398 if (ExitedPad == &FPI) { 3399 ExitsFPI = true; 3400 // Now we can resolve any ancestors of CurrentPad up to 3401 // FPI, but not including FPI since we need to make sure 3402 // to check all direct users of FPI for consistency. 3403 UnresolvedAncestorPad = &FPI; 3404 break; 3405 } 3406 Value *ExitedParent = getParentPad(ExitedPad); 3407 if (ExitedParent == UnwindParent) { 3408 // ExitedPad is the ancestor-most pad which this unwind 3409 // edge exits, so we can resolve up to it, meaning that 3410 // ExitedParent is the first ancestor still unresolved. 3411 UnresolvedAncestorPad = ExitedParent; 3412 break; 3413 } 3414 ExitedPad = ExitedParent; 3415 } while (!isa<ConstantTokenNone>(ExitedPad)); 3416 } else { 3417 // Unwinding to caller exits all pads. 3418 UnwindPad = ConstantTokenNone::get(FPI.getContext()); 3419 ExitsFPI = true; 3420 UnresolvedAncestorPad = &FPI; 3421 } 3422 3423 if (ExitsFPI) { 3424 // This unwind edge exits FPI. Make sure it agrees with other 3425 // such edges. 3426 if (FirstUser) { 3427 Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet " 3428 "pad must have the same unwind " 3429 "dest", 3430 &FPI, U, FirstUser); 3431 } else { 3432 FirstUser = U; 3433 FirstUnwindPad = UnwindPad; 3434 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds 3435 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) && 3436 getParentPad(UnwindPad) == getParentPad(&FPI)) 3437 SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U); 3438 } 3439 } 3440 // Make sure we visit all uses of FPI, but for nested pads stop as 3441 // soon as we know where they unwind to. 3442 if (CurrentPad != &FPI) 3443 break; 3444 } 3445 if (UnresolvedAncestorPad) { 3446 if (CurrentPad == UnresolvedAncestorPad) { 3447 // When CurrentPad is FPI itself, we don't mark it as resolved even if 3448 // we've found an unwind edge that exits it, because we need to verify 3449 // all direct uses of FPI. 3450 assert(CurrentPad == &FPI); 3451 continue; 3452 } 3453 // Pop off the worklist any nested pads that we've found an unwind 3454 // destination for. The pads on the worklist are the uncles, 3455 // great-uncles, etc. of CurrentPad. We've found an unwind destination 3456 // for all ancestors of CurrentPad up to but not including 3457 // UnresolvedAncestorPad. 3458 Value *ResolvedPad = CurrentPad; 3459 while (!Worklist.empty()) { 3460 Value *UnclePad = Worklist.back(); 3461 Value *AncestorPad = getParentPad(UnclePad); 3462 // Walk ResolvedPad up the ancestor list until we either find the 3463 // uncle's parent or the last resolved ancestor. 3464 while (ResolvedPad != AncestorPad) { 3465 Value *ResolvedParent = getParentPad(ResolvedPad); 3466 if (ResolvedParent == UnresolvedAncestorPad) { 3467 break; 3468 } 3469 ResolvedPad = ResolvedParent; 3470 } 3471 // If the resolved ancestor search didn't find the uncle's parent, 3472 // then the uncle is not yet resolved. 3473 if (ResolvedPad != AncestorPad) 3474 break; 3475 // This uncle is resolved, so pop it from the worklist. 3476 Worklist.pop_back(); 3477 } 3478 } 3479 } 3480 3481 if (FirstUnwindPad) { 3482 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) { 3483 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest(); 3484 Value *SwitchUnwindPad; 3485 if (SwitchUnwindDest) 3486 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI(); 3487 else 3488 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext()); 3489 Assert(SwitchUnwindPad == FirstUnwindPad, 3490 "Unwind edges out of a catch must have the same unwind dest as " 3491 "the parent catchswitch", 3492 &FPI, FirstUser, CatchSwitch); 3493 } 3494 } 3495 3496 visitInstruction(FPI); 3497 } 3498 3499 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) { 3500 BasicBlock *BB = CatchSwitch.getParent(); 3501 3502 Function *F = BB->getParent(); 3503 Assert(F->hasPersonalityFn(), 3504 "CatchSwitchInst needs to be in a function with a personality.", 3505 &CatchSwitch); 3506 3507 // The catchswitch instruction must be the first non-PHI instruction in the 3508 // block. 3509 Assert(BB->getFirstNonPHI() == &CatchSwitch, 3510 "CatchSwitchInst not the first non-PHI instruction in the block.", 3511 &CatchSwitch); 3512 3513 auto *ParentPad = CatchSwitch.getParentPad(); 3514 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad), 3515 "CatchSwitchInst has an invalid parent.", ParentPad); 3516 3517 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) { 3518 Instruction *I = UnwindDest->getFirstNonPHI(); 3519 Assert(I->isEHPad() && !isa<LandingPadInst>(I), 3520 "CatchSwitchInst must unwind to an EH block which is not a " 3521 "landingpad.", 3522 &CatchSwitch); 3523 3524 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds 3525 if (getParentPad(I) == ParentPad) 3526 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch; 3527 } 3528 3529 Assert(CatchSwitch.getNumHandlers() != 0, 3530 "CatchSwitchInst cannot have empty handler list", &CatchSwitch); 3531 3532 for (BasicBlock *Handler : CatchSwitch.handlers()) { 3533 Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()), 3534 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler); 3535 } 3536 3537 visitEHPadPredecessors(CatchSwitch); 3538 visitTerminatorInst(CatchSwitch); 3539 } 3540 3541 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) { 3542 Assert(isa<CleanupPadInst>(CRI.getOperand(0)), 3543 "CleanupReturnInst needs to be provided a CleanupPad", &CRI, 3544 CRI.getOperand(0)); 3545 3546 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) { 3547 Instruction *I = UnwindDest->getFirstNonPHI(); 3548 Assert(I->isEHPad() && !isa<LandingPadInst>(I), 3549 "CleanupReturnInst must unwind to an EH block which is not a " 3550 "landingpad.", 3551 &CRI); 3552 } 3553 3554 visitTerminatorInst(CRI); 3555 } 3556 3557 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { 3558 Instruction *Op = cast<Instruction>(I.getOperand(i)); 3559 // If the we have an invalid invoke, don't try to compute the dominance. 3560 // We already reject it in the invoke specific checks and the dominance 3561 // computation doesn't handle multiple edges. 3562 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) { 3563 if (II->getNormalDest() == II->getUnwindDest()) 3564 return; 3565 } 3566 3567 // Quick check whether the def has already been encountered in the same block. 3568 // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI 3569 // uses are defined to happen on the incoming edge, not at the instruction. 3570 // 3571 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata) 3572 // wrapping an SSA value, assert that we've already encountered it. See 3573 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp. 3574 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op)) 3575 return; 3576 3577 const Use &U = I.getOperandUse(i); 3578 Assert(DT.dominates(Op, U), 3579 "Instruction does not dominate all uses!", Op, &I); 3580 } 3581 3582 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) { 3583 Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null " 3584 "apply only to pointer types", &I); 3585 Assert(isa<LoadInst>(I), 3586 "dereferenceable, dereferenceable_or_null apply only to load" 3587 " instructions, use attributes for calls or invokes", &I); 3588 Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null " 3589 "take one operand!", &I); 3590 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0)); 3591 Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, " 3592 "dereferenceable_or_null metadata value must be an i64!", &I); 3593 } 3594 3595 /// verifyInstruction - Verify that an instruction is well formed. 3596 /// 3597 void Verifier::visitInstruction(Instruction &I) { 3598 BasicBlock *BB = I.getParent(); 3599 Assert(BB, "Instruction not embedded in basic block!", &I); 3600 3601 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential 3602 for (User *U : I.users()) { 3603 Assert(U != (User *)&I || !DT.isReachableFromEntry(BB), 3604 "Only PHI nodes may reference their own value!", &I); 3605 } 3606 } 3607 3608 // Check that void typed values don't have names 3609 Assert(!I.getType()->isVoidTy() || !I.hasName(), 3610 "Instruction has a name, but provides a void value!", &I); 3611 3612 // Check that the return value of the instruction is either void or a legal 3613 // value type. 3614 Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(), 3615 "Instruction returns a non-scalar type!", &I); 3616 3617 // Check that the instruction doesn't produce metadata. Calls are already 3618 // checked against the callee type. 3619 Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I), 3620 "Invalid use of metadata!", &I); 3621 3622 // Check that all uses of the instruction, if they are instructions 3623 // themselves, actually have parent basic blocks. If the use is not an 3624 // instruction, it is an error! 3625 for (Use &U : I.uses()) { 3626 if (Instruction *Used = dyn_cast<Instruction>(U.getUser())) 3627 Assert(Used->getParent() != nullptr, 3628 "Instruction referencing" 3629 " instruction not embedded in a basic block!", 3630 &I, Used); 3631 else { 3632 CheckFailed("Use of instruction is not an instruction!", U); 3633 return; 3634 } 3635 } 3636 3637 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 3638 Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I); 3639 3640 // Check to make sure that only first-class-values are operands to 3641 // instructions. 3642 if (!I.getOperand(i)->getType()->isFirstClassType()) { 3643 Assert(0, "Instruction operands must be first-class values!", &I); 3644 } 3645 3646 if (Function *F = dyn_cast<Function>(I.getOperand(i))) { 3647 // Check to make sure that the "address of" an intrinsic function is never 3648 // taken. 3649 Assert( 3650 !F->isIntrinsic() || 3651 i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0), 3652 "Cannot take the address of an intrinsic!", &I); 3653 Assert( 3654 !F->isIntrinsic() || isa<CallInst>(I) || 3655 F->getIntrinsicID() == Intrinsic::donothing || 3656 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void || 3657 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 || 3658 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint, 3659 "Cannot invoke an intrinsic other than donothing, patchpoint or " 3660 "statepoint", 3661 &I); 3662 Assert(F->getParent() == M, "Referencing function in another module!", 3663 &I, M, F, F->getParent()); 3664 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) { 3665 Assert(OpBB->getParent() == BB->getParent(), 3666 "Referring to a basic block in another function!", &I); 3667 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) { 3668 Assert(OpArg->getParent() == BB->getParent(), 3669 "Referring to an argument in another function!", &I); 3670 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) { 3671 Assert(GV->getParent() == M, "Referencing global in another module!", &I, M, GV, GV->getParent()); 3672 } else if (isa<Instruction>(I.getOperand(i))) { 3673 verifyDominatesUse(I, i); 3674 } else if (isa<InlineAsm>(I.getOperand(i))) { 3675 Assert((i + 1 == e && isa<CallInst>(I)) || 3676 (i + 3 == e && isa<InvokeInst>(I)), 3677 "Cannot take the address of an inline asm!", &I); 3678 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) { 3679 if (CE->getType()->isPtrOrPtrVectorTy()) { 3680 // If we have a ConstantExpr pointer, we need to see if it came from an 3681 // illegal bitcast (inttoptr <constant int> ) 3682 visitConstantExprsRecursively(CE); 3683 } 3684 } 3685 } 3686 3687 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) { 3688 Assert(I.getType()->isFPOrFPVectorTy(), 3689 "fpmath requires a floating point result!", &I); 3690 Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I); 3691 if (ConstantFP *CFP0 = 3692 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) { 3693 const APFloat &Accuracy = CFP0->getValueAPF(); 3694 Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle, 3695 "fpmath accuracy must have float type", &I); 3696 Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(), 3697 "fpmath accuracy not a positive number!", &I); 3698 } else { 3699 Assert(false, "invalid fpmath accuracy!", &I); 3700 } 3701 } 3702 3703 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) { 3704 Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I), 3705 "Ranges are only for loads, calls and invokes!", &I); 3706 visitRangeMetadata(I, Range, I.getType()); 3707 } 3708 3709 if (I.getMetadata(LLVMContext::MD_nonnull)) { 3710 Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types", 3711 &I); 3712 Assert(isa<LoadInst>(I), 3713 "nonnull applies only to load instructions, use attributes" 3714 " for calls or invokes", 3715 &I); 3716 } 3717 3718 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable)) 3719 visitDereferenceableMetadata(I, MD); 3720 3721 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null)) 3722 visitDereferenceableMetadata(I, MD); 3723 3724 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) { 3725 Assert(I.getType()->isPointerTy(), "align applies only to pointer types", 3726 &I); 3727 Assert(isa<LoadInst>(I), "align applies only to load instructions, " 3728 "use attributes for calls or invokes", &I); 3729 Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I); 3730 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0)); 3731 Assert(CI && CI->getType()->isIntegerTy(64), 3732 "align metadata value must be an i64!", &I); 3733 uint64_t Align = CI->getZExtValue(); 3734 Assert(isPowerOf2_64(Align), 3735 "align metadata value must be a power of 2!", &I); 3736 Assert(Align <= Value::MaximumAlignment, 3737 "alignment is larger that implementation defined limit", &I); 3738 } 3739 3740 if (MDNode *N = I.getDebugLoc().getAsMDNode()) { 3741 AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N); 3742 visitMDNode(*N); 3743 } 3744 3745 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I)) 3746 verifyBitPieceExpression(*DII); 3747 3748 InstsInThisBlock.insert(&I); 3749 } 3750 3751 /// Allow intrinsics to be verified in different ways. 3752 void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) { 3753 Function *IF = CS.getCalledFunction(); 3754 Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!", 3755 IF); 3756 3757 // Verify that the intrinsic prototype lines up with what the .td files 3758 // describe. 3759 FunctionType *IFTy = IF->getFunctionType(); 3760 bool IsVarArg = IFTy->isVarArg(); 3761 3762 SmallVector<Intrinsic::IITDescriptor, 8> Table; 3763 getIntrinsicInfoTableEntries(ID, Table); 3764 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; 3765 3766 SmallVector<Type *, 4> ArgTys; 3767 Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(), 3768 TableRef, ArgTys), 3769 "Intrinsic has incorrect return type!", IF); 3770 for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i) 3771 Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i), 3772 TableRef, ArgTys), 3773 "Intrinsic has incorrect argument type!", IF); 3774 3775 // Verify if the intrinsic call matches the vararg property. 3776 if (IsVarArg) 3777 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef), 3778 "Intrinsic was not defined with variable arguments!", IF); 3779 else 3780 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef), 3781 "Callsite was not defined with variable arguments!", IF); 3782 3783 // All descriptors should be absorbed by now. 3784 Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF); 3785 3786 // Now that we have the intrinsic ID and the actual argument types (and we 3787 // know they are legal for the intrinsic!) get the intrinsic name through the 3788 // usual means. This allows us to verify the mangling of argument types into 3789 // the name. 3790 const std::string ExpectedName = Intrinsic::getName(ID, ArgTys); 3791 Assert(ExpectedName == IF->getName(), 3792 "Intrinsic name not mangled correctly for type arguments! " 3793 "Should be: " + 3794 ExpectedName, 3795 IF); 3796 3797 // If the intrinsic takes MDNode arguments, verify that they are either global 3798 // or are local to *this* function. 3799 for (Value *V : CS.args()) 3800 if (auto *MD = dyn_cast<MetadataAsValue>(V)) 3801 visitMetadataAsValue(*MD, CS.getCaller()); 3802 3803 switch (ID) { 3804 default: 3805 break; 3806 case Intrinsic::ctlz: // llvm.ctlz 3807 case Intrinsic::cttz: // llvm.cttz 3808 Assert(isa<ConstantInt>(CS.getArgOperand(1)), 3809 "is_zero_undef argument of bit counting intrinsics must be a " 3810 "constant int", 3811 CS); 3812 break; 3813 case Intrinsic::dbg_declare: // llvm.dbg.declare 3814 Assert(isa<MetadataAsValue>(CS.getArgOperand(0)), 3815 "invalid llvm.dbg.declare intrinsic call 1", CS); 3816 visitDbgIntrinsic("declare", cast<DbgDeclareInst>(*CS.getInstruction())); 3817 break; 3818 case Intrinsic::dbg_value: // llvm.dbg.value 3819 visitDbgIntrinsic("value", cast<DbgValueInst>(*CS.getInstruction())); 3820 break; 3821 case Intrinsic::memcpy: 3822 case Intrinsic::memmove: 3823 case Intrinsic::memset: { 3824 ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3)); 3825 Assert(AlignCI, 3826 "alignment argument of memory intrinsics must be a constant int", 3827 CS); 3828 const APInt &AlignVal = AlignCI->getValue(); 3829 Assert(AlignCI->isZero() || AlignVal.isPowerOf2(), 3830 "alignment argument of memory intrinsics must be a power of 2", CS); 3831 Assert(isa<ConstantInt>(CS.getArgOperand(4)), 3832 "isvolatile argument of memory intrinsics must be a constant int", 3833 CS); 3834 break; 3835 } 3836 case Intrinsic::gcroot: 3837 case Intrinsic::gcwrite: 3838 case Intrinsic::gcread: 3839 if (ID == Intrinsic::gcroot) { 3840 AllocaInst *AI = 3841 dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts()); 3842 Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS); 3843 Assert(isa<Constant>(CS.getArgOperand(1)), 3844 "llvm.gcroot parameter #2 must be a constant.", CS); 3845 if (!AI->getAllocatedType()->isPointerTy()) { 3846 Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)), 3847 "llvm.gcroot parameter #1 must either be a pointer alloca, " 3848 "or argument #2 must be a non-null constant.", 3849 CS); 3850 } 3851 } 3852 3853 Assert(CS.getParent()->getParent()->hasGC(), 3854 "Enclosing function does not use GC.", CS); 3855 break; 3856 case Intrinsic::init_trampoline: 3857 Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()), 3858 "llvm.init_trampoline parameter #2 must resolve to a function.", 3859 CS); 3860 break; 3861 case Intrinsic::prefetch: 3862 Assert(isa<ConstantInt>(CS.getArgOperand(1)) && 3863 isa<ConstantInt>(CS.getArgOperand(2)) && 3864 cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 && 3865 cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4, 3866 "invalid arguments to llvm.prefetch", CS); 3867 break; 3868 case Intrinsic::stackprotector: 3869 Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()), 3870 "llvm.stackprotector parameter #2 must resolve to an alloca.", CS); 3871 break; 3872 case Intrinsic::lifetime_start: 3873 case Intrinsic::lifetime_end: 3874 case Intrinsic::invariant_start: 3875 Assert(isa<ConstantInt>(CS.getArgOperand(0)), 3876 "size argument of memory use markers must be a constant integer", 3877 CS); 3878 break; 3879 case Intrinsic::invariant_end: 3880 Assert(isa<ConstantInt>(CS.getArgOperand(1)), 3881 "llvm.invariant.end parameter #2 must be a constant integer", CS); 3882 break; 3883 3884 case Intrinsic::localescape: { 3885 BasicBlock *BB = CS.getParent(); 3886 Assert(BB == &BB->getParent()->front(), 3887 "llvm.localescape used outside of entry block", CS); 3888 Assert(!SawFrameEscape, 3889 "multiple calls to llvm.localescape in one function", CS); 3890 for (Value *Arg : CS.args()) { 3891 if (isa<ConstantPointerNull>(Arg)) 3892 continue; // Null values are allowed as placeholders. 3893 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); 3894 Assert(AI && AI->isStaticAlloca(), 3895 "llvm.localescape only accepts static allocas", CS); 3896 } 3897 FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands(); 3898 SawFrameEscape = true; 3899 break; 3900 } 3901 case Intrinsic::localrecover: { 3902 Value *FnArg = CS.getArgOperand(0)->stripPointerCasts(); 3903 Function *Fn = dyn_cast<Function>(FnArg); 3904 Assert(Fn && !Fn->isDeclaration(), 3905 "llvm.localrecover first " 3906 "argument must be function defined in this module", 3907 CS); 3908 auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2)); 3909 Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int", 3910 CS); 3911 auto &Entry = FrameEscapeInfo[Fn]; 3912 Entry.second = unsigned( 3913 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1)); 3914 break; 3915 } 3916 3917 case Intrinsic::experimental_gc_statepoint: 3918 Assert(!CS.isInlineAsm(), 3919 "gc.statepoint support for inline assembly unimplemented", CS); 3920 Assert(CS.getParent()->getParent()->hasGC(), 3921 "Enclosing function does not use GC.", CS); 3922 3923 verifyStatepoint(CS); 3924 break; 3925 case Intrinsic::experimental_gc_result: { 3926 Assert(CS.getParent()->getParent()->hasGC(), 3927 "Enclosing function does not use GC.", CS); 3928 // Are we tied to a statepoint properly? 3929 CallSite StatepointCS(CS.getArgOperand(0)); 3930 const Function *StatepointFn = 3931 StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr; 3932 Assert(StatepointFn && StatepointFn->isDeclaration() && 3933 StatepointFn->getIntrinsicID() == 3934 Intrinsic::experimental_gc_statepoint, 3935 "gc.result operand #1 must be from a statepoint", CS, 3936 CS.getArgOperand(0)); 3937 3938 // Assert that result type matches wrapped callee. 3939 const Value *Target = StatepointCS.getArgument(2); 3940 auto *PT = cast<PointerType>(Target->getType()); 3941 auto *TargetFuncType = cast<FunctionType>(PT->getElementType()); 3942 Assert(CS.getType() == TargetFuncType->getReturnType(), 3943 "gc.result result type does not match wrapped callee", CS); 3944 break; 3945 } 3946 case Intrinsic::experimental_gc_relocate: { 3947 Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS); 3948 3949 Assert(isa<PointerType>(CS.getType()->getScalarType()), 3950 "gc.relocate must return a pointer or a vector of pointers", CS); 3951 3952 // Check that this relocate is correctly tied to the statepoint 3953 3954 // This is case for relocate on the unwinding path of an invoke statepoint 3955 if (LandingPadInst *LandingPad = 3956 dyn_cast<LandingPadInst>(CS.getArgOperand(0))) { 3957 3958 const BasicBlock *InvokeBB = 3959 LandingPad->getParent()->getUniquePredecessor(); 3960 3961 // Landingpad relocates should have only one predecessor with invoke 3962 // statepoint terminator 3963 Assert(InvokeBB, "safepoints should have unique landingpads", 3964 LandingPad->getParent()); 3965 Assert(InvokeBB->getTerminator(), "safepoint block should be well formed", 3966 InvokeBB); 3967 Assert(isStatepoint(InvokeBB->getTerminator()), 3968 "gc relocate should be linked to a statepoint", InvokeBB); 3969 } 3970 else { 3971 // In all other cases relocate should be tied to the statepoint directly. 3972 // This covers relocates on a normal return path of invoke statepoint and 3973 // relocates of a call statepoint. 3974 auto Token = CS.getArgOperand(0); 3975 Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)), 3976 "gc relocate is incorrectly tied to the statepoint", CS, Token); 3977 } 3978 3979 // Verify rest of the relocate arguments. 3980 3981 ImmutableCallSite StatepointCS( 3982 cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint()); 3983 3984 // Both the base and derived must be piped through the safepoint. 3985 Value* Base = CS.getArgOperand(1); 3986 Assert(isa<ConstantInt>(Base), 3987 "gc.relocate operand #2 must be integer offset", CS); 3988 3989 Value* Derived = CS.getArgOperand(2); 3990 Assert(isa<ConstantInt>(Derived), 3991 "gc.relocate operand #3 must be integer offset", CS); 3992 3993 const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue(); 3994 const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue(); 3995 // Check the bounds 3996 Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(), 3997 "gc.relocate: statepoint base index out of bounds", CS); 3998 Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(), 3999 "gc.relocate: statepoint derived index out of bounds", CS); 4000 4001 // Check that BaseIndex and DerivedIndex fall within the 'gc parameters' 4002 // section of the statepoint's argument. 4003 Assert(StatepointCS.arg_size() > 0, 4004 "gc.statepoint: insufficient arguments"); 4005 Assert(isa<ConstantInt>(StatepointCS.getArgument(3)), 4006 "gc.statement: number of call arguments must be constant integer"); 4007 const unsigned NumCallArgs = 4008 cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue(); 4009 Assert(StatepointCS.arg_size() > NumCallArgs + 5, 4010 "gc.statepoint: mismatch in number of call arguments"); 4011 Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)), 4012 "gc.statepoint: number of transition arguments must be " 4013 "a constant integer"); 4014 const int NumTransitionArgs = 4015 cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)) 4016 ->getZExtValue(); 4017 const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1; 4018 Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)), 4019 "gc.statepoint: number of deoptimization arguments must be " 4020 "a constant integer"); 4021 const int NumDeoptArgs = 4022 cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)) 4023 ->getZExtValue(); 4024 const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs; 4025 const int GCParamArgsEnd = StatepointCS.arg_size(); 4026 Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd, 4027 "gc.relocate: statepoint base index doesn't fall within the " 4028 "'gc parameters' section of the statepoint call", 4029 CS); 4030 Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd, 4031 "gc.relocate: statepoint derived index doesn't fall within the " 4032 "'gc parameters' section of the statepoint call", 4033 CS); 4034 4035 // Relocated value must be either a pointer type or vector-of-pointer type, 4036 // but gc_relocate does not need to return the same pointer type as the 4037 // relocated pointer. It can be casted to the correct type later if it's 4038 // desired. However, they must have the same address space and 'vectorness' 4039 GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction()); 4040 Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(), 4041 "gc.relocate: relocated value must be a gc pointer", CS); 4042 4043 auto ResultType = CS.getType(); 4044 auto DerivedType = Relocate.getDerivedPtr()->getType(); 4045 Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(), 4046 "gc.relocate: vector relocates to vector and pointer to pointer", 4047 CS); 4048 Assert( 4049 ResultType->getPointerAddressSpace() == 4050 DerivedType->getPointerAddressSpace(), 4051 "gc.relocate: relocating a pointer shouldn't change its address space", 4052 CS); 4053 break; 4054 } 4055 case Intrinsic::eh_exceptioncode: 4056 case Intrinsic::eh_exceptionpointer: { 4057 Assert(isa<CatchPadInst>(CS.getArgOperand(0)), 4058 "eh.exceptionpointer argument must be a catchpad", CS); 4059 break; 4060 } 4061 case Intrinsic::masked_load: { 4062 Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS); 4063 4064 Value *Ptr = CS.getArgOperand(0); 4065 //Value *Alignment = CS.getArgOperand(1); 4066 Value *Mask = CS.getArgOperand(2); 4067 Value *PassThru = CS.getArgOperand(3); 4068 Assert(Mask->getType()->isVectorTy(), 4069 "masked_load: mask must be vector", CS); 4070 4071 // DataTy is the overloaded type 4072 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 4073 Assert(DataTy == CS.getType(), 4074 "masked_load: return must match pointer type", CS); 4075 Assert(PassThru->getType() == DataTy, 4076 "masked_load: pass through and data type must match", CS); 4077 Assert(Mask->getType()->getVectorNumElements() == 4078 DataTy->getVectorNumElements(), 4079 "masked_load: vector mask must be same length as data", CS); 4080 break; 4081 } 4082 case Intrinsic::masked_store: { 4083 Value *Val = CS.getArgOperand(0); 4084 Value *Ptr = CS.getArgOperand(1); 4085 //Value *Alignment = CS.getArgOperand(2); 4086 Value *Mask = CS.getArgOperand(3); 4087 Assert(Mask->getType()->isVectorTy(), 4088 "masked_store: mask must be vector", CS); 4089 4090 // DataTy is the overloaded type 4091 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType(); 4092 Assert(DataTy == Val->getType(), 4093 "masked_store: storee must match pointer type", CS); 4094 Assert(Mask->getType()->getVectorNumElements() == 4095 DataTy->getVectorNumElements(), 4096 "masked_store: vector mask must be same length as data", CS); 4097 break; 4098 } 4099 4100 case Intrinsic::experimental_guard: { 4101 Assert(CS.isCall(), "experimental_guard cannot be invoked", CS); 4102 Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, 4103 "experimental_guard must have exactly one " 4104 "\"deopt\" operand bundle"); 4105 break; 4106 } 4107 4108 case Intrinsic::experimental_deoptimize: { 4109 Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS); 4110 Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, 4111 "experimental_deoptimize must have exactly one " 4112 "\"deopt\" operand bundle"); 4113 Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(), 4114 "experimental_deoptimize return type must match caller return type"); 4115 4116 if (CS.isCall()) { 4117 auto *DeoptCI = CS.getInstruction(); 4118 auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode()); 4119 Assert(RI, 4120 "calls to experimental_deoptimize must be followed by a return"); 4121 4122 if (!CS.getType()->isVoidTy() && RI) 4123 Assert(RI->getReturnValue() == DeoptCI, 4124 "calls to experimental_deoptimize must be followed by a return " 4125 "of the value computed by experimental_deoptimize"); 4126 } 4127 4128 break; 4129 } 4130 }; 4131 } 4132 4133 /// \brief Carefully grab the subprogram from a local scope. 4134 /// 4135 /// This carefully grabs the subprogram from a local scope, avoiding the 4136 /// built-in assertions that would typically fire. 4137 static DISubprogram *getSubprogram(Metadata *LocalScope) { 4138 if (!LocalScope) 4139 return nullptr; 4140 4141 if (auto *SP = dyn_cast<DISubprogram>(LocalScope)) 4142 return SP; 4143 4144 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope)) 4145 return getSubprogram(LB->getRawScope()); 4146 4147 // Just return null; broken scope chains are checked elsewhere. 4148 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope"); 4149 return nullptr; 4150 } 4151 4152 template <class DbgIntrinsicTy> 4153 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) { 4154 auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata(); 4155 AssertDI(isa<ValueAsMetadata>(MD) || 4156 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()), 4157 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD); 4158 AssertDI(isa<DILocalVariable>(DII.getRawVariable()), 4159 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII, 4160 DII.getRawVariable()); 4161 AssertDI(isa<DIExpression>(DII.getRawExpression()), 4162 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII, 4163 DII.getRawExpression()); 4164 4165 // Ignore broken !dbg attachments; they're checked elsewhere. 4166 if (MDNode *N = DII.getDebugLoc().getAsMDNode()) 4167 if (!isa<DILocation>(N)) 4168 return; 4169 4170 BasicBlock *BB = DII.getParent(); 4171 Function *F = BB ? BB->getParent() : nullptr; 4172 4173 // The scopes for variables and !dbg attachments must agree. 4174 DILocalVariable *Var = DII.getVariable(); 4175 DILocation *Loc = DII.getDebugLoc(); 4176 Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", 4177 &DII, BB, F); 4178 4179 DISubprogram *VarSP = getSubprogram(Var->getRawScope()); 4180 DISubprogram *LocSP = getSubprogram(Loc->getRawScope()); 4181 if (!VarSP || !LocSP) 4182 return; // Broken scope chains are checked elsewhere. 4183 4184 Assert(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind + 4185 " variable and !dbg attachment", 4186 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc, 4187 Loc->getScope()->getSubprogram()); 4188 } 4189 4190 static uint64_t getVariableSize(const DILocalVariable &V) { 4191 // Be careful of broken types (checked elsewhere). 4192 const Metadata *RawType = V.getRawType(); 4193 while (RawType) { 4194 // Try to get the size directly. 4195 if (auto *T = dyn_cast<DIType>(RawType)) 4196 if (uint64_t Size = T->getSizeInBits()) 4197 return Size; 4198 4199 if (auto *DT = dyn_cast<DIDerivedType>(RawType)) { 4200 // Look at the base type. 4201 RawType = DT->getRawBaseType(); 4202 continue; 4203 } 4204 4205 // Missing type or size. 4206 break; 4207 } 4208 4209 // Fail gracefully. 4210 return 0; 4211 } 4212 4213 void Verifier::verifyBitPieceExpression(const DbgInfoIntrinsic &I) { 4214 DILocalVariable *V; 4215 DIExpression *E; 4216 if (auto *DVI = dyn_cast<DbgValueInst>(&I)) { 4217 V = dyn_cast_or_null<DILocalVariable>(DVI->getRawVariable()); 4218 E = dyn_cast_or_null<DIExpression>(DVI->getRawExpression()); 4219 } else { 4220 auto *DDI = cast<DbgDeclareInst>(&I); 4221 V = dyn_cast_or_null<DILocalVariable>(DDI->getRawVariable()); 4222 E = dyn_cast_or_null<DIExpression>(DDI->getRawExpression()); 4223 } 4224 4225 // We don't know whether this intrinsic verified correctly. 4226 if (!V || !E || !E->isValid()) 4227 return; 4228 4229 // Nothing to do if this isn't a bit piece expression. 4230 if (!E->isBitPiece()) 4231 return; 4232 4233 // The frontend helps out GDB by emitting the members of local anonymous 4234 // unions as artificial local variables with shared storage. When SROA splits 4235 // the storage for artificial local variables that are smaller than the entire 4236 // union, the overhang piece will be outside of the allotted space for the 4237 // variable and this check fails. 4238 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs. 4239 if (V->isArtificial()) 4240 return; 4241 4242 // If there's no size, the type is broken, but that should be checked 4243 // elsewhere. 4244 uint64_t VarSize = getVariableSize(*V); 4245 if (!VarSize) 4246 return; 4247 4248 unsigned PieceSize = E->getBitPieceSize(); 4249 unsigned PieceOffset = E->getBitPieceOffset(); 4250 Assert(PieceSize + PieceOffset <= VarSize, 4251 "piece is larger than or outside of variable", &I, V, E); 4252 Assert(PieceSize != VarSize, "piece covers entire variable", &I, V, E); 4253 } 4254 4255 void Verifier::verifyCompileUnits() { 4256 auto *CUs = M->getNamedMetadata("llvm.dbg.cu"); 4257 SmallPtrSet<const Metadata *, 2> Listed; 4258 if (CUs) 4259 Listed.insert(CUs->op_begin(), CUs->op_end()); 4260 Assert( 4261 std::all_of(CUVisited.begin(), CUVisited.end(), 4262 [&Listed](const Metadata *CU) { return Listed.count(CU); }), 4263 "All DICompileUnits must be listed in llvm.dbg.cu"); 4264 CUVisited.clear(); 4265 } 4266 4267 void Verifier::verifyDeoptimizeCallingConvs() { 4268 if (DeoptimizeDeclarations.empty()) 4269 return; 4270 4271 const Function *First = DeoptimizeDeclarations[0]; 4272 for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) { 4273 Assert(First->getCallingConv() == F->getCallingConv(), 4274 "All llvm.experimental.deoptimize declarations must have the same " 4275 "calling convention", 4276 First, F); 4277 } 4278 } 4279 4280 //===----------------------------------------------------------------------===// 4281 // Implement the public interfaces to this file... 4282 //===----------------------------------------------------------------------===// 4283 4284 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) { 4285 Function &F = const_cast<Function &>(f); 4286 4287 // Don't use a raw_null_ostream. Printing IR is expensive. 4288 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true); 4289 4290 // Note that this function's return value is inverted from what you would 4291 // expect of a function called "verify". 4292 return !V.verify(F); 4293 } 4294 4295 bool llvm::verifyModule(const Module &M, raw_ostream *OS, 4296 bool *BrokenDebugInfo) { 4297 // Don't use a raw_null_ostream. Printing IR is expensive. 4298 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo); 4299 4300 bool Broken = false; 4301 for (const Function &F : M) 4302 Broken |= !V.verify(F); 4303 4304 Broken |= !V.verify(M); 4305 if (BrokenDebugInfo) 4306 *BrokenDebugInfo = V.hasBrokenDebugInfo(); 4307 // Note that this function's return value is inverted from what you would 4308 // expect of a function called "verify". 4309 return Broken; 4310 } 4311 4312 namespace { 4313 struct VerifierLegacyPass : public FunctionPass { 4314 static char ID; 4315 4316 Verifier V; 4317 bool FatalErrors = true; 4318 4319 VerifierLegacyPass() 4320 : FunctionPass(ID), 4321 V(&dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false) { 4322 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); 4323 } 4324 explicit VerifierLegacyPass(bool FatalErrors) 4325 : FunctionPass(ID), 4326 V(&dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false), 4327 FatalErrors(FatalErrors) { 4328 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry()); 4329 } 4330 4331 bool runOnFunction(Function &F) override { 4332 if (!V.verify(F) && FatalErrors) 4333 report_fatal_error("Broken function found, compilation aborted!"); 4334 4335 return false; 4336 } 4337 4338 bool doFinalization(Module &M) override { 4339 bool HasErrors = false; 4340 for (Function &F : M) 4341 if (F.isDeclaration()) 4342 HasErrors |= !V.verify(F); 4343 4344 HasErrors |= !V.verify(M); 4345 if (FatalErrors) { 4346 if (HasErrors) 4347 report_fatal_error("Broken module found, compilation aborted!"); 4348 assert(!V.hasBrokenDebugInfo() && "Module contains invalid debug info"); 4349 } 4350 4351 // Strip broken debug info. 4352 if (V.hasBrokenDebugInfo()) { 4353 DiagnosticInfoIgnoringInvalidDebugMetadata DiagInvalid(M); 4354 M.getContext().diagnose(DiagInvalid); 4355 if (!StripDebugInfo(M)) 4356 report_fatal_error("Failed to strip malformed debug info"); 4357 } 4358 return false; 4359 } 4360 4361 void getAnalysisUsage(AnalysisUsage &AU) const override { 4362 AU.setPreservesAll(); 4363 } 4364 }; 4365 } 4366 4367 char VerifierLegacyPass::ID = 0; 4368 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false) 4369 4370 FunctionPass *llvm::createVerifierPass(bool FatalErrors) { 4371 return new VerifierLegacyPass(FatalErrors); 4372 } 4373 4374 char VerifierAnalysis::PassID; 4375 VerifierAnalysis::Result VerifierAnalysis::run(Module &M, 4376 ModuleAnalysisManager &) { 4377 Result Res; 4378 Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken); 4379 return Res; 4380 } 4381 4382 VerifierAnalysis::Result VerifierAnalysis::run(Function &F, 4383 FunctionAnalysisManager &) { 4384 return { llvm::verifyFunction(F, &dbgs()), false }; 4385 } 4386 4387 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) { 4388 auto Res = AM.getResult<VerifierAnalysis>(M); 4389 if (FatalErrors) { 4390 if (Res.IRBroken) 4391 report_fatal_error("Broken module found, compilation aborted!"); 4392 assert(!Res.DebugInfoBroken && "Module contains invalid debug info"); 4393 } 4394 4395 // Strip broken debug info. 4396 if (Res.DebugInfoBroken) { 4397 DiagnosticInfoIgnoringInvalidDebugMetadata DiagInvalid(M); 4398 M.getContext().diagnose(DiagInvalid); 4399 if (!StripDebugInfo(M)) 4400 report_fatal_error("Failed to strip malformed debug info"); 4401 } 4402 return PreservedAnalyses::all(); 4403 } 4404 4405 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 4406 auto res = AM.getResult<VerifierAnalysis>(F); 4407 if (res.IRBroken && FatalErrors) 4408 report_fatal_error("Broken function found, compilation aborted!"); 4409 4410 return PreservedAnalyses::all(); 4411 } 4412