1 //===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass implements whole program optimization of virtual calls in cases 11 // where we know (via !type metadata) that the list of callees is fixed. This 12 // includes the following: 13 // - Single implementation devirtualization: if a virtual call has a single 14 // possible callee, replace all calls with a direct call to that callee. 15 // - Virtual constant propagation: if the virtual function's return type is an 16 // integer <=64 bits and all possible callees are readnone, for each class and 17 // each list of constant arguments: evaluate the function, store the return 18 // value alongside the virtual table, and rewrite each virtual call as a load 19 // from the virtual table. 20 // - Uniform return value optimization: if the conditions for virtual constant 21 // propagation hold and each function returns the same constant value, replace 22 // each virtual call with that constant. 23 // - Unique return value optimization for i1 return values: if the conditions 24 // for virtual constant propagation hold and a single vtable's function 25 // returns 0, or a single vtable's function returns 1, replace each virtual 26 // call with a comparison of the vptr against that vtable's address. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/Transforms/IPO/WholeProgramDevirt.h" 31 #include "llvm/ADT/ArrayRef.h" 32 #include "llvm/ADT/DenseSet.h" 33 #include "llvm/ADT/MapVector.h" 34 #include "llvm/Analysis/TypeMetadataUtils.h" 35 #include "llvm/IR/CallSite.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DiagnosticInfo.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/Intrinsics.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/IPO.h" 46 #include "llvm/Transforms/Utils/Evaluator.h" 47 #include "llvm/Transforms/Utils/Local.h" 48 49 #include <set> 50 51 using namespace llvm; 52 using namespace wholeprogramdevirt; 53 54 #define DEBUG_TYPE "wholeprogramdevirt" 55 56 // Find the minimum offset that we may store a value of size Size bits at. If 57 // IsAfter is set, look for an offset before the object, otherwise look for an 58 // offset after the object. 59 uint64_t 60 wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets, 61 bool IsAfter, uint64_t Size) { 62 // Find a minimum offset taking into account only vtable sizes. 63 uint64_t MinByte = 0; 64 for (const VirtualCallTarget &Target : Targets) { 65 if (IsAfter) 66 MinByte = std::max(MinByte, Target.minAfterBytes()); 67 else 68 MinByte = std::max(MinByte, Target.minBeforeBytes()); 69 } 70 71 // Build a vector of arrays of bytes covering, for each target, a slice of the 72 // used region (see AccumBitVector::BytesUsed in 73 // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively, 74 // this aligns the used regions to start at MinByte. 75 // 76 // In this example, A, B and C are vtables, # is a byte already allocated for 77 // a virtual function pointer, AAAA... (etc.) are the used regions for the 78 // vtables and Offset(X) is the value computed for the Offset variable below 79 // for X. 80 // 81 // Offset(A) 82 // | | 83 // |MinByte 84 // A: ################AAAAAAAA|AAAAAAAA 85 // B: ########BBBBBBBBBBBBBBBB|BBBB 86 // C: ########################|CCCCCCCCCCCCCCCC 87 // | Offset(B) | 88 // 89 // This code produces the slices of A, B and C that appear after the divider 90 // at MinByte. 91 std::vector<ArrayRef<uint8_t>> Used; 92 for (const VirtualCallTarget &Target : Targets) { 93 ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed 94 : Target.TM->Bits->Before.BytesUsed; 95 uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes() 96 : MinByte - Target.minBeforeBytes(); 97 98 // Disregard used regions that are smaller than Offset. These are 99 // effectively all-free regions that do not need to be checked. 100 if (VTUsed.size() > Offset) 101 Used.push_back(VTUsed.slice(Offset)); 102 } 103 104 if (Size == 1) { 105 // Find a free bit in each member of Used. 106 for (unsigned I = 0;; ++I) { 107 uint8_t BitsUsed = 0; 108 for (auto &&B : Used) 109 if (I < B.size()) 110 BitsUsed |= B[I]; 111 if (BitsUsed != 0xff) 112 return (MinByte + I) * 8 + 113 countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined); 114 } 115 } else { 116 // Find a free (Size/8) byte region in each member of Used. 117 // FIXME: see if alignment helps. 118 for (unsigned I = 0;; ++I) { 119 for (auto &&B : Used) { 120 unsigned Byte = 0; 121 while ((I + Byte) < B.size() && Byte < (Size / 8)) { 122 if (B[I + Byte]) 123 goto NextI; 124 ++Byte; 125 } 126 } 127 return (MinByte + I) * 8; 128 NextI:; 129 } 130 } 131 } 132 133 void wholeprogramdevirt::setBeforeReturnValues( 134 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore, 135 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { 136 if (BitWidth == 1) 137 OffsetByte = -(AllocBefore / 8 + 1); 138 else 139 OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8); 140 OffsetBit = AllocBefore % 8; 141 142 for (VirtualCallTarget &Target : Targets) { 143 if (BitWidth == 1) 144 Target.setBeforeBit(AllocBefore); 145 else 146 Target.setBeforeBytes(AllocBefore, (BitWidth + 7) / 8); 147 } 148 } 149 150 void wholeprogramdevirt::setAfterReturnValues( 151 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter, 152 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) { 153 if (BitWidth == 1) 154 OffsetByte = AllocAfter / 8; 155 else 156 OffsetByte = (AllocAfter + 7) / 8; 157 OffsetBit = AllocAfter % 8; 158 159 for (VirtualCallTarget &Target : Targets) { 160 if (BitWidth == 1) 161 Target.setAfterBit(AllocAfter); 162 else 163 Target.setAfterBytes(AllocAfter, (BitWidth + 7) / 8); 164 } 165 } 166 167 VirtualCallTarget::VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM) 168 : Fn(Fn), TM(TM), 169 IsBigEndian(Fn->getParent()->getDataLayout().isBigEndian()) {} 170 171 namespace { 172 173 // A slot in a set of virtual tables. The TypeID identifies the set of virtual 174 // tables, and the ByteOffset is the offset in bytes from the address point to 175 // the virtual function pointer. 176 struct VTableSlot { 177 Metadata *TypeID; 178 uint64_t ByteOffset; 179 }; 180 181 } 182 183 namespace llvm { 184 185 template <> struct DenseMapInfo<VTableSlot> { 186 static VTableSlot getEmptyKey() { 187 return {DenseMapInfo<Metadata *>::getEmptyKey(), 188 DenseMapInfo<uint64_t>::getEmptyKey()}; 189 } 190 static VTableSlot getTombstoneKey() { 191 return {DenseMapInfo<Metadata *>::getTombstoneKey(), 192 DenseMapInfo<uint64_t>::getTombstoneKey()}; 193 } 194 static unsigned getHashValue(const VTableSlot &I) { 195 return DenseMapInfo<Metadata *>::getHashValue(I.TypeID) ^ 196 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset); 197 } 198 static bool isEqual(const VTableSlot &LHS, 199 const VTableSlot &RHS) { 200 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset; 201 } 202 }; 203 204 } 205 206 namespace { 207 208 // A virtual call site. VTable is the loaded virtual table pointer, and CS is 209 // the indirect virtual call. 210 struct VirtualCallSite { 211 Value *VTable; 212 CallSite CS; 213 214 // If non-null, this field points to the associated unsafe use count stored in 215 // the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description 216 // of that field for details. 217 unsigned *NumUnsafeUses; 218 219 void emitRemark() { 220 Function *F = CS.getCaller(); 221 emitOptimizationRemark(F->getContext(), DEBUG_TYPE, *F, 222 CS.getInstruction()->getDebugLoc(), 223 "devirtualized call"); 224 } 225 226 void replaceAndErase(Value *New) { 227 emitRemark(); 228 CS->replaceAllUsesWith(New); 229 if (auto II = dyn_cast<InvokeInst>(CS.getInstruction())) { 230 BranchInst::Create(II->getNormalDest(), CS.getInstruction()); 231 II->getUnwindDest()->removePredecessor(II->getParent()); 232 } 233 CS->eraseFromParent(); 234 // This use is no longer unsafe. 235 if (NumUnsafeUses) 236 --*NumUnsafeUses; 237 } 238 }; 239 240 struct DevirtModule { 241 Module &M; 242 IntegerType *Int8Ty; 243 PointerType *Int8PtrTy; 244 IntegerType *Int32Ty; 245 246 MapVector<VTableSlot, std::vector<VirtualCallSite>> CallSlots; 247 248 // This map keeps track of the number of "unsafe" uses of a loaded function 249 // pointer. The key is the associated llvm.type.test intrinsic call generated 250 // by this pass. An unsafe use is one that calls the loaded function pointer 251 // directly. Every time we eliminate an unsafe use (for example, by 252 // devirtualizing it or by applying virtual constant propagation), we 253 // decrement the value stored in this map. If a value reaches zero, we can 254 // eliminate the type check by RAUWing the associated llvm.type.test call with 255 // true. 256 std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest; 257 258 DevirtModule(Module &M) 259 : M(M), Int8Ty(Type::getInt8Ty(M.getContext())), 260 Int8PtrTy(Type::getInt8PtrTy(M.getContext())), 261 Int32Ty(Type::getInt32Ty(M.getContext())) {} 262 263 void scanTypeTestUsers(Function *TypeTestFunc, Function *AssumeFunc); 264 void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc); 265 266 void buildTypeIdentifierMap( 267 std::vector<VTableBits> &Bits, 268 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap); 269 bool 270 tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot, 271 const std::set<TypeMemberInfo> &TypeMemberInfos, 272 uint64_t ByteOffset); 273 bool trySingleImplDevirt(ArrayRef<VirtualCallTarget> TargetsForSlot, 274 MutableArrayRef<VirtualCallSite> CallSites); 275 bool tryEvaluateFunctionsWithArgs( 276 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 277 ArrayRef<ConstantInt *> Args); 278 bool tryUniformRetValOpt(IntegerType *RetType, 279 ArrayRef<VirtualCallTarget> TargetsForSlot, 280 MutableArrayRef<VirtualCallSite> CallSites); 281 bool tryUniqueRetValOpt(unsigned BitWidth, 282 ArrayRef<VirtualCallTarget> TargetsForSlot, 283 MutableArrayRef<VirtualCallSite> CallSites); 284 bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot, 285 ArrayRef<VirtualCallSite> CallSites); 286 287 void rebuildGlobal(VTableBits &B); 288 289 bool run(); 290 }; 291 292 struct WholeProgramDevirt : public ModulePass { 293 static char ID; 294 WholeProgramDevirt() : ModulePass(ID) { 295 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry()); 296 } 297 bool runOnModule(Module &M) { 298 if (skipModule(M)) 299 return false; 300 301 return DevirtModule(M).run(); 302 } 303 }; 304 305 } // anonymous namespace 306 307 INITIALIZE_PASS(WholeProgramDevirt, "wholeprogramdevirt", 308 "Whole program devirtualization", false, false) 309 char WholeProgramDevirt::ID = 0; 310 311 ModulePass *llvm::createWholeProgramDevirtPass() { 312 return new WholeProgramDevirt; 313 } 314 315 PreservedAnalyses WholeProgramDevirtPass::run(Module &M, 316 ModuleAnalysisManager &) { 317 if (!DevirtModule(M).run()) 318 return PreservedAnalyses::all(); 319 return PreservedAnalyses::none(); 320 } 321 322 void DevirtModule::buildTypeIdentifierMap( 323 std::vector<VTableBits> &Bits, 324 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) { 325 DenseMap<GlobalVariable *, VTableBits *> GVToBits; 326 Bits.reserve(M.getGlobalList().size()); 327 SmallVector<MDNode *, 2> Types; 328 for (GlobalVariable &GV : M.globals()) { 329 Types.clear(); 330 GV.getMetadata(LLVMContext::MD_type, Types); 331 if (Types.empty()) 332 continue; 333 334 VTableBits *&BitsPtr = GVToBits[&GV]; 335 if (!BitsPtr) { 336 Bits.emplace_back(); 337 Bits.back().GV = &GV; 338 Bits.back().ObjectSize = 339 M.getDataLayout().getTypeAllocSize(GV.getInitializer()->getType()); 340 BitsPtr = &Bits.back(); 341 } 342 343 for (MDNode *Type : Types) { 344 auto TypeID = Type->getOperand(1).get(); 345 346 uint64_t Offset = 347 cast<ConstantInt>( 348 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue()) 349 ->getZExtValue(); 350 351 TypeIdMap[TypeID].insert({BitsPtr, Offset}); 352 } 353 } 354 } 355 356 bool DevirtModule::tryFindVirtualCallTargets( 357 std::vector<VirtualCallTarget> &TargetsForSlot, 358 const std::set<TypeMemberInfo> &TypeMemberInfos, uint64_t ByteOffset) { 359 for (const TypeMemberInfo &TM : TypeMemberInfos) { 360 if (!TM.Bits->GV->isConstant()) 361 return false; 362 363 auto Init = dyn_cast<ConstantArray>(TM.Bits->GV->getInitializer()); 364 if (!Init) 365 return false; 366 ArrayType *VTableTy = Init->getType(); 367 368 uint64_t ElemSize = 369 M.getDataLayout().getTypeAllocSize(VTableTy->getElementType()); 370 uint64_t GlobalSlotOffset = TM.Offset + ByteOffset; 371 if (GlobalSlotOffset % ElemSize != 0) 372 return false; 373 374 unsigned Op = GlobalSlotOffset / ElemSize; 375 if (Op >= Init->getNumOperands()) 376 return false; 377 378 auto Fn = dyn_cast<Function>(Init->getOperand(Op)->stripPointerCasts()); 379 if (!Fn) 380 return false; 381 382 // We can disregard __cxa_pure_virtual as a possible call target, as 383 // calls to pure virtuals are UB. 384 if (Fn->getName() == "__cxa_pure_virtual") 385 continue; 386 387 TargetsForSlot.push_back({Fn, &TM}); 388 } 389 390 // Give up if we couldn't find any targets. 391 return !TargetsForSlot.empty(); 392 } 393 394 bool DevirtModule::trySingleImplDevirt( 395 ArrayRef<VirtualCallTarget> TargetsForSlot, 396 MutableArrayRef<VirtualCallSite> CallSites) { 397 // See if the program contains a single implementation of this virtual 398 // function. 399 Function *TheFn = TargetsForSlot[0].Fn; 400 for (auto &&Target : TargetsForSlot) 401 if (TheFn != Target.Fn) 402 return false; 403 404 // If so, update each call site to call that implementation directly. 405 for (auto &&VCallSite : CallSites) { 406 VCallSite.emitRemark(); 407 VCallSite.CS.setCalledFunction(ConstantExpr::getBitCast( 408 TheFn, VCallSite.CS.getCalledValue()->getType())); 409 // This use is no longer unsafe. 410 if (VCallSite.NumUnsafeUses) 411 --*VCallSite.NumUnsafeUses; 412 } 413 return true; 414 } 415 416 bool DevirtModule::tryEvaluateFunctionsWithArgs( 417 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 418 ArrayRef<ConstantInt *> Args) { 419 // Evaluate each function and store the result in each target's RetVal 420 // field. 421 for (VirtualCallTarget &Target : TargetsForSlot) { 422 if (Target.Fn->arg_size() != Args.size() + 1) 423 return false; 424 for (unsigned I = 0; I != Args.size(); ++I) 425 if (Target.Fn->getFunctionType()->getParamType(I + 1) != 426 Args[I]->getType()) 427 return false; 428 429 Evaluator Eval(M.getDataLayout(), nullptr); 430 SmallVector<Constant *, 2> EvalArgs; 431 EvalArgs.push_back( 432 Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0))); 433 EvalArgs.insert(EvalArgs.end(), Args.begin(), Args.end()); 434 Constant *RetVal; 435 if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) || 436 !isa<ConstantInt>(RetVal)) 437 return false; 438 Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue(); 439 } 440 return true; 441 } 442 443 bool DevirtModule::tryUniformRetValOpt( 444 IntegerType *RetType, ArrayRef<VirtualCallTarget> TargetsForSlot, 445 MutableArrayRef<VirtualCallSite> CallSites) { 446 // Uniform return value optimization. If all functions return the same 447 // constant, replace all calls with that constant. 448 uint64_t TheRetVal = TargetsForSlot[0].RetVal; 449 for (const VirtualCallTarget &Target : TargetsForSlot) 450 if (Target.RetVal != TheRetVal) 451 return false; 452 453 auto TheRetValConst = ConstantInt::get(RetType, TheRetVal); 454 for (auto Call : CallSites) 455 Call.replaceAndErase(TheRetValConst); 456 return true; 457 } 458 459 bool DevirtModule::tryUniqueRetValOpt( 460 unsigned BitWidth, ArrayRef<VirtualCallTarget> TargetsForSlot, 461 MutableArrayRef<VirtualCallSite> CallSites) { 462 // IsOne controls whether we look for a 0 or a 1. 463 auto tryUniqueRetValOptFor = [&](bool IsOne) { 464 const TypeMemberInfo *UniqueMember = 0; 465 for (const VirtualCallTarget &Target : TargetsForSlot) { 466 if (Target.RetVal == (IsOne ? 1 : 0)) { 467 if (UniqueMember) 468 return false; 469 UniqueMember = Target.TM; 470 } 471 } 472 473 // We should have found a unique member or bailed out by now. We already 474 // checked for a uniform return value in tryUniformRetValOpt. 475 assert(UniqueMember); 476 477 // Replace each call with the comparison. 478 for (auto &&Call : CallSites) { 479 IRBuilder<> B(Call.CS.getInstruction()); 480 Value *OneAddr = B.CreateBitCast(UniqueMember->Bits->GV, Int8PtrTy); 481 OneAddr = B.CreateConstGEP1_64(OneAddr, UniqueMember->Offset); 482 Value *Cmp = B.CreateICmp(IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 483 Call.VTable, OneAddr); 484 Call.replaceAndErase(Cmp); 485 } 486 return true; 487 }; 488 489 if (BitWidth == 1) { 490 if (tryUniqueRetValOptFor(true)) 491 return true; 492 if (tryUniqueRetValOptFor(false)) 493 return true; 494 } 495 return false; 496 } 497 498 bool DevirtModule::tryVirtualConstProp( 499 MutableArrayRef<VirtualCallTarget> TargetsForSlot, 500 ArrayRef<VirtualCallSite> CallSites) { 501 // This only works if the function returns an integer. 502 auto RetType = dyn_cast<IntegerType>(TargetsForSlot[0].Fn->getReturnType()); 503 if (!RetType) 504 return false; 505 unsigned BitWidth = RetType->getBitWidth(); 506 if (BitWidth > 64) 507 return false; 508 509 // Make sure that each function does not access memory, takes at least one 510 // argument, does not use its first argument (which we assume is 'this'), 511 // and has the same return type. 512 for (VirtualCallTarget &Target : TargetsForSlot) { 513 if (!Target.Fn->doesNotAccessMemory() || Target.Fn->arg_empty() || 514 !Target.Fn->arg_begin()->use_empty() || 515 Target.Fn->getReturnType() != RetType) 516 return false; 517 } 518 519 // Group call sites by the list of constant arguments they pass. 520 // The comparator ensures deterministic ordering. 521 struct ByAPIntValue { 522 bool operator()(const std::vector<ConstantInt *> &A, 523 const std::vector<ConstantInt *> &B) const { 524 return std::lexicographical_compare( 525 A.begin(), A.end(), B.begin(), B.end(), 526 [](ConstantInt *AI, ConstantInt *BI) { 527 return AI->getValue().ult(BI->getValue()); 528 }); 529 } 530 }; 531 std::map<std::vector<ConstantInt *>, std::vector<VirtualCallSite>, 532 ByAPIntValue> 533 VCallSitesByConstantArg; 534 for (auto &&VCallSite : CallSites) { 535 std::vector<ConstantInt *> Args; 536 if (VCallSite.CS.getType() != RetType) 537 continue; 538 for (auto &&Arg : 539 make_range(VCallSite.CS.arg_begin() + 1, VCallSite.CS.arg_end())) { 540 if (!isa<ConstantInt>(Arg)) 541 break; 542 Args.push_back(cast<ConstantInt>(&Arg)); 543 } 544 if (Args.size() + 1 != VCallSite.CS.arg_size()) 545 continue; 546 547 VCallSitesByConstantArg[Args].push_back(VCallSite); 548 } 549 550 for (auto &&CSByConstantArg : VCallSitesByConstantArg) { 551 if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, CSByConstantArg.first)) 552 continue; 553 554 if (tryUniformRetValOpt(RetType, TargetsForSlot, CSByConstantArg.second)) 555 continue; 556 557 if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSByConstantArg.second)) 558 continue; 559 560 // Find an allocation offset in bits in all vtables associated with the 561 // type. 562 uint64_t AllocBefore = 563 findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth); 564 uint64_t AllocAfter = 565 findLowestOffset(TargetsForSlot, /*IsAfter=*/true, BitWidth); 566 567 // Calculate the total amount of padding needed to store a value at both 568 // ends of the object. 569 uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0; 570 for (auto &&Target : TargetsForSlot) { 571 TotalPaddingBefore += std::max<int64_t>( 572 (AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, 0); 573 TotalPaddingAfter += std::max<int64_t>( 574 (AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, 0); 575 } 576 577 // If the amount of padding is too large, give up. 578 // FIXME: do something smarter here. 579 if (std::min(TotalPaddingBefore, TotalPaddingAfter) > 128) 580 continue; 581 582 // Calculate the offset to the value as a (possibly negative) byte offset 583 // and (if applicable) a bit offset, and store the values in the targets. 584 int64_t OffsetByte; 585 uint64_t OffsetBit; 586 if (TotalPaddingBefore <= TotalPaddingAfter) 587 setBeforeReturnValues(TargetsForSlot, AllocBefore, BitWidth, OffsetByte, 588 OffsetBit); 589 else 590 setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte, 591 OffsetBit); 592 593 // Rewrite each call to a load from OffsetByte/OffsetBit. 594 for (auto Call : CSByConstantArg.second) { 595 IRBuilder<> B(Call.CS.getInstruction()); 596 Value *Addr = B.CreateConstGEP1_64(Call.VTable, OffsetByte); 597 if (BitWidth == 1) { 598 Value *Bits = B.CreateLoad(Addr); 599 Value *Bit = ConstantInt::get(Int8Ty, 1ULL << OffsetBit); 600 Value *BitsAndBit = B.CreateAnd(Bits, Bit); 601 auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0)); 602 Call.replaceAndErase(IsBitSet); 603 } else { 604 Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo()); 605 Value *Val = B.CreateLoad(RetType, ValAddr); 606 Call.replaceAndErase(Val); 607 } 608 } 609 } 610 return true; 611 } 612 613 void DevirtModule::rebuildGlobal(VTableBits &B) { 614 if (B.Before.Bytes.empty() && B.After.Bytes.empty()) 615 return; 616 617 // Align each byte array to pointer width. 618 unsigned PointerSize = M.getDataLayout().getPointerSize(); 619 B.Before.Bytes.resize(alignTo(B.Before.Bytes.size(), PointerSize)); 620 B.After.Bytes.resize(alignTo(B.After.Bytes.size(), PointerSize)); 621 622 // Before was stored in reverse order; flip it now. 623 for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I) 624 std::swap(B.Before.Bytes[I], B.Before.Bytes[Size - 1 - I]); 625 626 // Build an anonymous global containing the before bytes, followed by the 627 // original initializer, followed by the after bytes. 628 auto NewInit = ConstantStruct::getAnon( 629 {ConstantDataArray::get(M.getContext(), B.Before.Bytes), 630 B.GV->getInitializer(), 631 ConstantDataArray::get(M.getContext(), B.After.Bytes)}); 632 auto NewGV = 633 new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(), 634 GlobalVariable::PrivateLinkage, NewInit, "", B.GV); 635 NewGV->setSection(B.GV->getSection()); 636 NewGV->setComdat(B.GV->getComdat()); 637 638 // Copy the original vtable's metadata to the anonymous global, adjusting 639 // offsets as required. 640 NewGV->copyMetadata(B.GV, B.Before.Bytes.size()); 641 642 // Build an alias named after the original global, pointing at the second 643 // element (the original initializer). 644 auto Alias = GlobalAlias::create( 645 B.GV->getInitializer()->getType(), 0, B.GV->getLinkage(), "", 646 ConstantExpr::getGetElementPtr( 647 NewInit->getType(), NewGV, 648 ArrayRef<Constant *>{ConstantInt::get(Int32Ty, 0), 649 ConstantInt::get(Int32Ty, 1)}), 650 &M); 651 Alias->setVisibility(B.GV->getVisibility()); 652 Alias->takeName(B.GV); 653 654 B.GV->replaceAllUsesWith(Alias); 655 B.GV->eraseFromParent(); 656 } 657 658 void DevirtModule::scanTypeTestUsers(Function *TypeTestFunc, 659 Function *AssumeFunc) { 660 // Find all virtual calls via a virtual table pointer %p under an assumption 661 // of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p 662 // points to a member of the type identifier %md. Group calls by (type ID, 663 // offset) pair (effectively the identity of the virtual function) and store 664 // to CallSlots. 665 DenseSet<Value *> SeenPtrs; 666 for (auto I = TypeTestFunc->use_begin(), E = TypeTestFunc->use_end(); 667 I != E;) { 668 auto CI = dyn_cast<CallInst>(I->getUser()); 669 ++I; 670 if (!CI) 671 continue; 672 673 // Search for virtual calls based on %p and add them to DevirtCalls. 674 SmallVector<DevirtCallSite, 1> DevirtCalls; 675 SmallVector<CallInst *, 1> Assumes; 676 findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI); 677 678 // If we found any, add them to CallSlots. Only do this if we haven't seen 679 // the vtable pointer before, as it may have been CSE'd with pointers from 680 // other call sites, and we don't want to process call sites multiple times. 681 if (!Assumes.empty()) { 682 Metadata *TypeId = 683 cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata(); 684 Value *Ptr = CI->getArgOperand(0)->stripPointerCasts(); 685 if (SeenPtrs.insert(Ptr).second) { 686 for (DevirtCallSite Call : DevirtCalls) { 687 CallSlots[{TypeId, Call.Offset}].push_back( 688 {CI->getArgOperand(0), Call.CS, nullptr}); 689 } 690 } 691 } 692 693 // We no longer need the assumes or the type test. 694 for (auto Assume : Assumes) 695 Assume->eraseFromParent(); 696 // We can't use RecursivelyDeleteTriviallyDeadInstructions here because we 697 // may use the vtable argument later. 698 if (CI->use_empty()) 699 CI->eraseFromParent(); 700 } 701 } 702 703 void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) { 704 Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test); 705 706 for (auto I = TypeCheckedLoadFunc->use_begin(), 707 E = TypeCheckedLoadFunc->use_end(); 708 I != E;) { 709 auto CI = dyn_cast<CallInst>(I->getUser()); 710 ++I; 711 if (!CI) 712 continue; 713 714 Value *Ptr = CI->getArgOperand(0); 715 Value *Offset = CI->getArgOperand(1); 716 Value *TypeIdValue = CI->getArgOperand(2); 717 Metadata *TypeId = cast<MetadataAsValue>(TypeIdValue)->getMetadata(); 718 719 SmallVector<DevirtCallSite, 1> DevirtCalls; 720 SmallVector<Instruction *, 1> LoadedPtrs; 721 SmallVector<Instruction *, 1> Preds; 722 bool HasNonCallUses = false; 723 findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds, 724 HasNonCallUses, CI); 725 726 // Start by generating "pessimistic" code that explicitly loads the function 727 // pointer from the vtable and performs the type check. If possible, we will 728 // eliminate the load and the type check later. 729 730 // If possible, only generate the load at the point where it is used. 731 // This helps avoid unnecessary spills. 732 IRBuilder<> LoadB( 733 (LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI); 734 Value *GEP = LoadB.CreateGEP(Int8Ty, Ptr, Offset); 735 Value *GEPPtr = LoadB.CreateBitCast(GEP, PointerType::getUnqual(Int8PtrTy)); 736 Value *LoadedValue = LoadB.CreateLoad(Int8PtrTy, GEPPtr); 737 738 for (Instruction *LoadedPtr : LoadedPtrs) { 739 LoadedPtr->replaceAllUsesWith(LoadedValue); 740 LoadedPtr->eraseFromParent(); 741 } 742 743 // Likewise for the type test. 744 IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI); 745 CallInst *TypeTestCall = CallB.CreateCall(TypeTestFunc, {Ptr, TypeIdValue}); 746 747 for (Instruction *Pred : Preds) { 748 Pred->replaceAllUsesWith(TypeTestCall); 749 Pred->eraseFromParent(); 750 } 751 752 // We have already erased any extractvalue instructions that refer to the 753 // intrinsic call, but the intrinsic may have other non-extractvalue uses 754 // (although this is unlikely). In that case, explicitly build a pair and 755 // RAUW it. 756 if (!CI->use_empty()) { 757 Value *Pair = UndefValue::get(CI->getType()); 758 IRBuilder<> B(CI); 759 Pair = B.CreateInsertValue(Pair, LoadedValue, {0}); 760 Pair = B.CreateInsertValue(Pair, TypeTestCall, {1}); 761 CI->replaceAllUsesWith(Pair); 762 } 763 764 // The number of unsafe uses is initially the number of uses. 765 auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall]; 766 NumUnsafeUses = DevirtCalls.size(); 767 768 // If the function pointer has a non-call user, we cannot eliminate the type 769 // check, as one of those users may eventually call the pointer. Increment 770 // the unsafe use count to make sure it cannot reach zero. 771 if (HasNonCallUses) 772 ++NumUnsafeUses; 773 for (DevirtCallSite Call : DevirtCalls) { 774 CallSlots[{TypeId, Call.Offset}].push_back( 775 {Ptr, Call.CS, &NumUnsafeUses}); 776 } 777 778 CI->eraseFromParent(); 779 } 780 } 781 782 bool DevirtModule::run() { 783 Function *TypeTestFunc = 784 M.getFunction(Intrinsic::getName(Intrinsic::type_test)); 785 Function *TypeCheckedLoadFunc = 786 M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load)); 787 Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume)); 788 789 if ((!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc || 790 AssumeFunc->use_empty()) && 791 (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty())) 792 return false; 793 794 if (TypeTestFunc && AssumeFunc) 795 scanTypeTestUsers(TypeTestFunc, AssumeFunc); 796 797 if (TypeCheckedLoadFunc) 798 scanTypeCheckedLoadUsers(TypeCheckedLoadFunc); 799 800 // Rebuild type metadata into a map for easy lookup. 801 std::vector<VTableBits> Bits; 802 DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap; 803 buildTypeIdentifierMap(Bits, TypeIdMap); 804 if (TypeIdMap.empty()) 805 return true; 806 807 // For each (type, offset) pair: 808 bool DidVirtualConstProp = false; 809 for (auto &S : CallSlots) { 810 // Search each of the members of the type identifier for the virtual 811 // function implementation at offset S.first.ByteOffset, and add to 812 // TargetsForSlot. 813 std::vector<VirtualCallTarget> TargetsForSlot; 814 if (!tryFindVirtualCallTargets(TargetsForSlot, TypeIdMap[S.first.TypeID], 815 S.first.ByteOffset)) 816 continue; 817 818 if (trySingleImplDevirt(TargetsForSlot, S.second)) 819 continue; 820 821 DidVirtualConstProp |= tryVirtualConstProp(TargetsForSlot, S.second); 822 } 823 824 // If we were able to eliminate all unsafe uses for a type checked load, 825 // eliminate the type test by replacing it with true. 826 if (TypeCheckedLoadFunc) { 827 auto True = ConstantInt::getTrue(M.getContext()); 828 for (auto &&U : NumUnsafeUsesForTypeTest) { 829 if (U.second == 0) { 830 U.first->replaceAllUsesWith(True); 831 U.first->eraseFromParent(); 832 } 833 } 834 } 835 836 // Rebuild each global we touched as part of virtual constant propagation to 837 // include the before and after bytes. 838 if (DidVirtualConstProp) 839 for (VTableBits &B : Bits) 840 rebuildGlobal(B); 841 842 return true; 843 } 844