Home | History | Annotate | Download | only in Instrumentation
      1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer, a race detector.
     11 //
     12 // The tool is under development, for the details about previous versions see
     13 // http://code.google.com/p/data-race-test
     14 //
     15 // The instrumentation phase is quite simple:
     16 //   - Insert calls to run-time library before every memory access.
     17 //      - Optimizations may apply to avoid instrumenting some of the accesses.
     18 //   - Insert calls at function entry/exit.
     19 // The rest is handled by the run-time library.
     20 //===----------------------------------------------------------------------===//
     21 
     22 #include "llvm/ADT/SmallPtrSet.h"
     23 #include "llvm/ADT/SmallString.h"
     24 #include "llvm/ADT/SmallVector.h"
     25 #include "llvm/ADT/Statistic.h"
     26 #include "llvm/ADT/StringExtras.h"
     27 #include "llvm/Analysis/CaptureTracking.h"
     28 #include "llvm/Analysis/TargetLibraryInfo.h"
     29 #include "llvm/Transforms/Utils/Local.h"
     30 #include "llvm/Analysis/ValueTracking.h"
     31 #include "llvm/IR/DataLayout.h"
     32 #include "llvm/IR/Function.h"
     33 #include "llvm/IR/IRBuilder.h"
     34 #include "llvm/IR/IntrinsicInst.h"
     35 #include "llvm/IR/Intrinsics.h"
     36 #include "llvm/IR/LLVMContext.h"
     37 #include "llvm/IR/Metadata.h"
     38 #include "llvm/IR/Module.h"
     39 #include "llvm/IR/Type.h"
     40 #include "llvm/ProfileData/InstrProf.h"
     41 #include "llvm/Support/CommandLine.h"
     42 #include "llvm/Support/Debug.h"
     43 #include "llvm/Support/MathExtras.h"
     44 #include "llvm/Support/raw_ostream.h"
     45 #include "llvm/Transforms/Instrumentation.h"
     46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
     47 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
     48 #include "llvm/Transforms/Utils/ModuleUtils.h"
     49 
     50 using namespace llvm;
     51 
     52 #define DEBUG_TYPE "tsan"
     53 
     54 static cl::opt<bool>  ClInstrumentMemoryAccesses(
     55     "tsan-instrument-memory-accesses", cl::init(true),
     56     cl::desc("Instrument memory accesses"), cl::Hidden);
     57 static cl::opt<bool>  ClInstrumentFuncEntryExit(
     58     "tsan-instrument-func-entry-exit", cl::init(true),
     59     cl::desc("Instrument function entry and exit"), cl::Hidden);
     60 static cl::opt<bool>  ClHandleCxxExceptions(
     61     "tsan-handle-cxx-exceptions", cl::init(true),
     62     cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"),
     63     cl::Hidden);
     64 static cl::opt<bool>  ClInstrumentAtomics(
     65     "tsan-instrument-atomics", cl::init(true),
     66     cl::desc("Instrument atomics"), cl::Hidden);
     67 static cl::opt<bool>  ClInstrumentMemIntrinsics(
     68     "tsan-instrument-memintrinsics", cl::init(true),
     69     cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
     70 
     71 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
     72 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
     73 STATISTIC(NumOmittedReadsBeforeWrite,
     74           "Number of reads ignored due to following writes");
     75 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
     76 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
     77 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
     78 STATISTIC(NumOmittedReadsFromConstantGlobals,
     79           "Number of reads from constant globals");
     80 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
     81 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
     82 
     83 static const char *const kTsanModuleCtorName = "tsan.module_ctor";
     84 static const char *const kTsanInitName = "__tsan_init";
     85 
     86 namespace {
     87 
     88 /// ThreadSanitizer: instrument the code in module to find races.
     89 struct ThreadSanitizer : public FunctionPass {
     90   ThreadSanitizer() : FunctionPass(ID) {}
     91   StringRef getPassName() const override;
     92   void getAnalysisUsage(AnalysisUsage &AU) const override;
     93   bool runOnFunction(Function &F) override;
     94   bool doInitialization(Module &M) override;
     95   static char ID;  // Pass identification, replacement for typeid.
     96 
     97  private:
     98   void initializeCallbacks(Module &M);
     99   bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
    100   bool instrumentAtomic(Instruction *I, const DataLayout &DL);
    101   bool instrumentMemIntrinsic(Instruction *I);
    102   void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
    103                                       SmallVectorImpl<Instruction *> &All,
    104                                       const DataLayout &DL);
    105   bool addrPointsToConstantData(Value *Addr);
    106   int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
    107   void InsertRuntimeIgnores(Function &F);
    108 
    109   Type *IntptrTy;
    110   IntegerType *OrdTy;
    111   // Callbacks to run-time library are computed in doInitialization.
    112   Function *TsanFuncEntry;
    113   Function *TsanFuncExit;
    114   Function *TsanIgnoreBegin;
    115   Function *TsanIgnoreEnd;
    116   // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
    117   static const size_t kNumberOfAccessSizes = 5;
    118   Function *TsanRead[kNumberOfAccessSizes];
    119   Function *TsanWrite[kNumberOfAccessSizes];
    120   Function *TsanUnalignedRead[kNumberOfAccessSizes];
    121   Function *TsanUnalignedWrite[kNumberOfAccessSizes];
    122   Function *TsanAtomicLoad[kNumberOfAccessSizes];
    123   Function *TsanAtomicStore[kNumberOfAccessSizes];
    124   Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
    125   Function *TsanAtomicCAS[kNumberOfAccessSizes];
    126   Function *TsanAtomicThreadFence;
    127   Function *TsanAtomicSignalFence;
    128   Function *TsanVptrUpdate;
    129   Function *TsanVptrLoad;
    130   Function *MemmoveFn, *MemcpyFn, *MemsetFn;
    131   Function *TsanCtorFunction;
    132 };
    133 }  // namespace
    134 
    135 char ThreadSanitizer::ID = 0;
    136 INITIALIZE_PASS_BEGIN(
    137     ThreadSanitizer, "tsan",
    138     "ThreadSanitizer: detects data races.",
    139     false, false)
    140 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
    141 INITIALIZE_PASS_END(
    142     ThreadSanitizer, "tsan",
    143     "ThreadSanitizer: detects data races.",
    144     false, false)
    145 
    146 StringRef ThreadSanitizer::getPassName() const { return "ThreadSanitizer"; }
    147 
    148 void ThreadSanitizer::getAnalysisUsage(AnalysisUsage &AU) const {
    149   AU.addRequired<TargetLibraryInfoWrapperPass>();
    150 }
    151 
    152 FunctionPass *llvm::createThreadSanitizerPass() {
    153   return new ThreadSanitizer();
    154 }
    155 
    156 void ThreadSanitizer::initializeCallbacks(Module &M) {
    157   IRBuilder<> IRB(M.getContext());
    158   AttributeList Attr;
    159   Attr = Attr.addAttribute(M.getContext(), AttributeList::FunctionIndex,
    160                            Attribute::NoUnwind);
    161   // Initialize the callbacks.
    162   TsanFuncEntry = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    163       "__tsan_func_entry", Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()));
    164   TsanFuncExit = checkSanitizerInterfaceFunction(
    165       M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy()));
    166   TsanIgnoreBegin = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    167       "__tsan_ignore_thread_begin", Attr, IRB.getVoidTy()));
    168   TsanIgnoreEnd = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    169       "__tsan_ignore_thread_end", Attr, IRB.getVoidTy()));
    170   OrdTy = IRB.getInt32Ty();
    171   for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
    172     const unsigned ByteSize = 1U << i;
    173     const unsigned BitSize = ByteSize * 8;
    174     std::string ByteSizeStr = utostr(ByteSize);
    175     std::string BitSizeStr = utostr(BitSize);
    176     SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
    177     TsanRead[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    178         ReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()));
    179 
    180     SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
    181     TsanWrite[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    182         WriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()));
    183 
    184     SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
    185     TsanUnalignedRead[i] =
    186         checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    187             UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()));
    188 
    189     SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
    190     TsanUnalignedWrite[i] =
    191         checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    192             UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()));
    193 
    194     Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
    195     Type *PtrTy = Ty->getPointerTo();
    196     SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
    197     TsanAtomicLoad[i] = checkSanitizerInterfaceFunction(
    198         M.getOrInsertFunction(AtomicLoadName, Attr, Ty, PtrTy, OrdTy));
    199 
    200     SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store");
    201     TsanAtomicStore[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    202         AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy));
    203 
    204     for (int op = AtomicRMWInst::FIRST_BINOP;
    205         op <= AtomicRMWInst::LAST_BINOP; ++op) {
    206       TsanAtomicRMW[op][i] = nullptr;
    207       const char *NamePart = nullptr;
    208       if (op == AtomicRMWInst::Xchg)
    209         NamePart = "_exchange";
    210       else if (op == AtomicRMWInst::Add)
    211         NamePart = "_fetch_add";
    212       else if (op == AtomicRMWInst::Sub)
    213         NamePart = "_fetch_sub";
    214       else if (op == AtomicRMWInst::And)
    215         NamePart = "_fetch_and";
    216       else if (op == AtomicRMWInst::Or)
    217         NamePart = "_fetch_or";
    218       else if (op == AtomicRMWInst::Xor)
    219         NamePart = "_fetch_xor";
    220       else if (op == AtomicRMWInst::Nand)
    221         NamePart = "_fetch_nand";
    222       else
    223         continue;
    224       SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
    225       TsanAtomicRMW[op][i] = checkSanitizerInterfaceFunction(
    226           M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy));
    227     }
    228 
    229     SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr +
    230                                   "_compare_exchange_val");
    231     TsanAtomicCAS[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    232         AtomicCASName, Attr, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy));
    233   }
    234   TsanVptrUpdate = checkSanitizerInterfaceFunction(
    235       M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
    236                             IRB.getInt8PtrTy(), IRB.getInt8PtrTy()));
    237   TsanVptrLoad = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    238       "__tsan_vptr_read", Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()));
    239   TsanAtomicThreadFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    240       "__tsan_atomic_thread_fence", Attr, IRB.getVoidTy(), OrdTy));
    241   TsanAtomicSignalFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
    242       "__tsan_atomic_signal_fence", Attr, IRB.getVoidTy(), OrdTy));
    243 
    244   MemmoveFn = checkSanitizerInterfaceFunction(
    245       M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
    246                             IRB.getInt8PtrTy(), IntptrTy));
    247   MemcpyFn = checkSanitizerInterfaceFunction(
    248       M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
    249                             IRB.getInt8PtrTy(), IntptrTy));
    250   MemsetFn = checkSanitizerInterfaceFunction(
    251       M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
    252                             IRB.getInt32Ty(), IntptrTy));
    253 }
    254 
    255 bool ThreadSanitizer::doInitialization(Module &M) {
    256   const DataLayout &DL = M.getDataLayout();
    257   IntptrTy = DL.getIntPtrType(M.getContext());
    258   std::tie(TsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
    259       M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
    260       /*InitArgs=*/{});
    261 
    262   appendToGlobalCtors(M, TsanCtorFunction, 0);
    263 
    264   return true;
    265 }
    266 
    267 static bool isVtableAccess(Instruction *I) {
    268   if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
    269     return Tag->isTBAAVtableAccess();
    270   return false;
    271 }
    272 
    273 // Do not instrument known races/"benign races" that come from compiler
    274 // instrumentatin. The user has no way of suppressing them.
    275 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
    276   // Peel off GEPs and BitCasts.
    277   Addr = Addr->stripInBoundsOffsets();
    278 
    279   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
    280     if (GV->hasSection()) {
    281       StringRef SectionName = GV->getSection();
    282       // Check if the global is in the PGO counters section.
    283       auto OF = Triple(M->getTargetTriple()).getObjectFormat();
    284       if (SectionName.endswith(
    285               getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
    286         return false;
    287     }
    288 
    289     // Check if the global is private gcov data.
    290     if (GV->getName().startswith("__llvm_gcov") ||
    291         GV->getName().startswith("__llvm_gcda"))
    292       return false;
    293   }
    294 
    295   // Do not instrument acesses from different address spaces; we cannot deal
    296   // with them.
    297   if (Addr) {
    298     Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
    299     if (PtrTy->getPointerAddressSpace() != 0)
    300       return false;
    301   }
    302 
    303   return true;
    304 }
    305 
    306 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
    307   // If this is a GEP, just analyze its pointer operand.
    308   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
    309     Addr = GEP->getPointerOperand();
    310 
    311   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
    312     if (GV->isConstant()) {
    313       // Reads from constant globals can not race with any writes.
    314       NumOmittedReadsFromConstantGlobals++;
    315       return true;
    316     }
    317   } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
    318     if (isVtableAccess(L)) {
    319       // Reads from a vtable pointer can not race with any writes.
    320       NumOmittedReadsFromVtable++;
    321       return true;
    322     }
    323   }
    324   return false;
    325 }
    326 
    327 // Instrumenting some of the accesses may be proven redundant.
    328 // Currently handled:
    329 //  - read-before-write (within same BB, no calls between)
    330 //  - not captured variables
    331 //
    332 // We do not handle some of the patterns that should not survive
    333 // after the classic compiler optimizations.
    334 // E.g. two reads from the same temp should be eliminated by CSE,
    335 // two writes should be eliminated by DSE, etc.
    336 //
    337 // 'Local' is a vector of insns within the same BB (no calls between).
    338 // 'All' is a vector of insns that will be instrumented.
    339 void ThreadSanitizer::chooseInstructionsToInstrument(
    340     SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
    341     const DataLayout &DL) {
    342   SmallPtrSet<Value*, 8> WriteTargets;
    343   // Iterate from the end.
    344   for (Instruction *I : reverse(Local)) {
    345     if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
    346       Value *Addr = Store->getPointerOperand();
    347       if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
    348         continue;
    349       WriteTargets.insert(Addr);
    350     } else {
    351       LoadInst *Load = cast<LoadInst>(I);
    352       Value *Addr = Load->getPointerOperand();
    353       if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
    354         continue;
    355       if (WriteTargets.count(Addr)) {
    356         // We will write to this temp, so no reason to analyze the read.
    357         NumOmittedReadsBeforeWrite++;
    358         continue;
    359       }
    360       if (addrPointsToConstantData(Addr)) {
    361         // Addr points to some constant data -- it can not race with any writes.
    362         continue;
    363       }
    364     }
    365     Value *Addr = isa<StoreInst>(*I)
    366         ? cast<StoreInst>(I)->getPointerOperand()
    367         : cast<LoadInst>(I)->getPointerOperand();
    368     if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
    369         !PointerMayBeCaptured(Addr, true, true)) {
    370       // The variable is addressable but not captured, so it cannot be
    371       // referenced from a different thread and participate in a data race
    372       // (see llvm/Analysis/CaptureTracking.h for details).
    373       NumOmittedNonCaptured++;
    374       continue;
    375     }
    376     All.push_back(I);
    377   }
    378   Local.clear();
    379 }
    380 
    381 static bool isAtomic(Instruction *I) {
    382   // TODO: Ask TTI whether synchronization scope is between threads.
    383   if (LoadInst *LI = dyn_cast<LoadInst>(I))
    384     return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
    385   if (StoreInst *SI = dyn_cast<StoreInst>(I))
    386     return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
    387   if (isa<AtomicRMWInst>(I))
    388     return true;
    389   if (isa<AtomicCmpXchgInst>(I))
    390     return true;
    391   if (isa<FenceInst>(I))
    392     return true;
    393   return false;
    394 }
    395 
    396 void ThreadSanitizer::InsertRuntimeIgnores(Function &F) {
    397   IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
    398   IRB.CreateCall(TsanIgnoreBegin);
    399   EscapeEnumerator EE(F, "tsan_ignore_cleanup", ClHandleCxxExceptions);
    400   while (IRBuilder<> *AtExit = EE.Next()) {
    401     AtExit->CreateCall(TsanIgnoreEnd);
    402   }
    403 }
    404 
    405 bool ThreadSanitizer::runOnFunction(Function &F) {
    406   // This is required to prevent instrumenting call to __tsan_init from within
    407   // the module constructor.
    408   if (&F == TsanCtorFunction)
    409     return false;
    410   initializeCallbacks(*F.getParent());
    411   SmallVector<Instruction*, 8> AllLoadsAndStores;
    412   SmallVector<Instruction*, 8> LocalLoadsAndStores;
    413   SmallVector<Instruction*, 8> AtomicAccesses;
    414   SmallVector<Instruction*, 8> MemIntrinCalls;
    415   bool Res = false;
    416   bool HasCalls = false;
    417   bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
    418   const DataLayout &DL = F.getParent()->getDataLayout();
    419   const TargetLibraryInfo *TLI =
    420       &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
    421 
    422   // Traverse all instructions, collect loads/stores/returns, check for calls.
    423   for (auto &BB : F) {
    424     for (auto &Inst : BB) {
    425       if (isAtomic(&Inst))
    426         AtomicAccesses.push_back(&Inst);
    427       else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
    428         LocalLoadsAndStores.push_back(&Inst);
    429       else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
    430         if (CallInst *CI = dyn_cast<CallInst>(&Inst))
    431           maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
    432         if (isa<MemIntrinsic>(Inst))
    433           MemIntrinCalls.push_back(&Inst);
    434         HasCalls = true;
    435         chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
    436                                        DL);
    437       }
    438     }
    439     chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
    440   }
    441 
    442   // We have collected all loads and stores.
    443   // FIXME: many of these accesses do not need to be checked for races
    444   // (e.g. variables that do not escape, etc).
    445 
    446   // Instrument memory accesses only if we want to report bugs in the function.
    447   if (ClInstrumentMemoryAccesses && SanitizeFunction)
    448     for (auto Inst : AllLoadsAndStores) {
    449       Res |= instrumentLoadOrStore(Inst, DL);
    450     }
    451 
    452   // Instrument atomic memory accesses in any case (they can be used to
    453   // implement synchronization).
    454   if (ClInstrumentAtomics)
    455     for (auto Inst : AtomicAccesses) {
    456       Res |= instrumentAtomic(Inst, DL);
    457     }
    458 
    459   if (ClInstrumentMemIntrinsics && SanitizeFunction)
    460     for (auto Inst : MemIntrinCalls) {
    461       Res |= instrumentMemIntrinsic(Inst);
    462     }
    463 
    464   if (F.hasFnAttribute("sanitize_thread_no_checking_at_run_time")) {
    465     assert(!F.hasFnAttribute(Attribute::SanitizeThread));
    466     if (HasCalls)
    467       InsertRuntimeIgnores(F);
    468   }
    469 
    470   // Instrument function entry/exit points if there were instrumented accesses.
    471   if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
    472     IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
    473     Value *ReturnAddress = IRB.CreateCall(
    474         Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
    475         IRB.getInt32(0));
    476     IRB.CreateCall(TsanFuncEntry, ReturnAddress);
    477 
    478     EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
    479     while (IRBuilder<> *AtExit = EE.Next()) {
    480       AtExit->CreateCall(TsanFuncExit, {});
    481     }
    482     Res = true;
    483   }
    484   return Res;
    485 }
    486 
    487 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
    488                                             const DataLayout &DL) {
    489   IRBuilder<> IRB(I);
    490   bool IsWrite = isa<StoreInst>(*I);
    491   Value *Addr = IsWrite
    492       ? cast<StoreInst>(I)->getPointerOperand()
    493       : cast<LoadInst>(I)->getPointerOperand();
    494 
    495   // swifterror memory addresses are mem2reg promoted by instruction selection.
    496   // As such they cannot have regular uses like an instrumentation function and
    497   // it makes no sense to track them as memory.
    498   if (Addr->isSwiftError())
    499     return false;
    500 
    501   int Idx = getMemoryAccessFuncIndex(Addr, DL);
    502   if (Idx < 0)
    503     return false;
    504   if (IsWrite && isVtableAccess(I)) {
    505     LLVM_DEBUG(dbgs() << "  VPTR : " << *I << "\n");
    506     Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
    507     // StoredValue may be a vector type if we are storing several vptrs at once.
    508     // In this case, just take the first element of the vector since this is
    509     // enough to find vptr races.
    510     if (isa<VectorType>(StoredValue->getType()))
    511       StoredValue = IRB.CreateExtractElement(
    512           StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
    513     if (StoredValue->getType()->isIntegerTy())
    514       StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
    515     // Call TsanVptrUpdate.
    516     IRB.CreateCall(TsanVptrUpdate,
    517                    {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
    518                     IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
    519     NumInstrumentedVtableWrites++;
    520     return true;
    521   }
    522   if (!IsWrite && isVtableAccess(I)) {
    523     IRB.CreateCall(TsanVptrLoad,
    524                    IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
    525     NumInstrumentedVtableReads++;
    526     return true;
    527   }
    528   const unsigned Alignment = IsWrite
    529       ? cast<StoreInst>(I)->getAlignment()
    530       : cast<LoadInst>(I)->getAlignment();
    531   Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
    532   const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
    533   Value *OnAccessFunc = nullptr;
    534   if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
    535     OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
    536   else
    537     OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
    538   IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
    539   if (IsWrite) NumInstrumentedWrites++;
    540   else         NumInstrumentedReads++;
    541   return true;
    542 }
    543 
    544 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
    545   uint32_t v = 0;
    546   switch (ord) {
    547     case AtomicOrdering::NotAtomic:
    548       llvm_unreachable("unexpected atomic ordering!");
    549     case AtomicOrdering::Unordered:              LLVM_FALLTHROUGH;
    550     case AtomicOrdering::Monotonic:              v = 0; break;
    551     // Not specified yet:
    552     // case AtomicOrdering::Consume:                v = 1; break;
    553     case AtomicOrdering::Acquire:                v = 2; break;
    554     case AtomicOrdering::Release:                v = 3; break;
    555     case AtomicOrdering::AcquireRelease:         v = 4; break;
    556     case AtomicOrdering::SequentiallyConsistent: v = 5; break;
    557   }
    558   return IRB->getInt32(v);
    559 }
    560 
    561 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
    562 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
    563 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
    564 // instead we simply replace them with regular function calls, which are then
    565 // intercepted by the run-time.
    566 // Since tsan is running after everyone else, the calls should not be
    567 // replaced back with intrinsics. If that becomes wrong at some point,
    568 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
    569 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
    570   IRBuilder<> IRB(I);
    571   if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
    572     IRB.CreateCall(
    573         MemsetFn,
    574         {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
    575          IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
    576          IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
    577     I->eraseFromParent();
    578   } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
    579     IRB.CreateCall(
    580         isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
    581         {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
    582          IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
    583          IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
    584     I->eraseFromParent();
    585   }
    586   return false;
    587 }
    588 
    589 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
    590 // standards.  For background see C++11 standard.  A slightly older, publicly
    591 // available draft of the standard (not entirely up-to-date, but close enough
    592 // for casual browsing) is available here:
    593 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
    594 // The following page contains more background information:
    595 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
    596 
    597 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
    598   IRBuilder<> IRB(I);
    599   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
    600     Value *Addr = LI->getPointerOperand();
    601     int Idx = getMemoryAccessFuncIndex(Addr, DL);
    602     if (Idx < 0)
    603       return false;
    604     const unsigned ByteSize = 1U << Idx;
    605     const unsigned BitSize = ByteSize * 8;
    606     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
    607     Type *PtrTy = Ty->getPointerTo();
    608     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
    609                      createOrdering(&IRB, LI->getOrdering())};
    610     Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
    611     Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
    612     Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
    613     I->replaceAllUsesWith(Cast);
    614   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
    615     Value *Addr = SI->getPointerOperand();
    616     int Idx = getMemoryAccessFuncIndex(Addr, DL);
    617     if (Idx < 0)
    618       return false;
    619     const unsigned ByteSize = 1U << Idx;
    620     const unsigned BitSize = ByteSize * 8;
    621     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
    622     Type *PtrTy = Ty->getPointerTo();
    623     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
    624                      IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
    625                      createOrdering(&IRB, SI->getOrdering())};
    626     CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
    627     ReplaceInstWithInst(I, C);
    628   } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
    629     Value *Addr = RMWI->getPointerOperand();
    630     int Idx = getMemoryAccessFuncIndex(Addr, DL);
    631     if (Idx < 0)
    632       return false;
    633     Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
    634     if (!F)
    635       return false;
    636     const unsigned ByteSize = 1U << Idx;
    637     const unsigned BitSize = ByteSize * 8;
    638     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
    639     Type *PtrTy = Ty->getPointerTo();
    640     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
    641                      IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
    642                      createOrdering(&IRB, RMWI->getOrdering())};
    643     CallInst *C = CallInst::Create(F, Args);
    644     ReplaceInstWithInst(I, C);
    645   } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
    646     Value *Addr = CASI->getPointerOperand();
    647     int Idx = getMemoryAccessFuncIndex(Addr, DL);
    648     if (Idx < 0)
    649       return false;
    650     const unsigned ByteSize = 1U << Idx;
    651     const unsigned BitSize = ByteSize * 8;
    652     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
    653     Type *PtrTy = Ty->getPointerTo();
    654     Value *CmpOperand =
    655       IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
    656     Value *NewOperand =
    657       IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
    658     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
    659                      CmpOperand,
    660                      NewOperand,
    661                      createOrdering(&IRB, CASI->getSuccessOrdering()),
    662                      createOrdering(&IRB, CASI->getFailureOrdering())};
    663     CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
    664     Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
    665     Value *OldVal = C;
    666     Type *OrigOldValTy = CASI->getNewValOperand()->getType();
    667     if (Ty != OrigOldValTy) {
    668       // The value is a pointer, so we need to cast the return value.
    669       OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
    670     }
    671 
    672     Value *Res =
    673       IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
    674     Res = IRB.CreateInsertValue(Res, Success, 1);
    675 
    676     I->replaceAllUsesWith(Res);
    677     I->eraseFromParent();
    678   } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
    679     Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
    680     Function *F = FI->getSyncScopeID() == SyncScope::SingleThread ?
    681         TsanAtomicSignalFence : TsanAtomicThreadFence;
    682     CallInst *C = CallInst::Create(F, Args);
    683     ReplaceInstWithInst(I, C);
    684   }
    685   return true;
    686 }
    687 
    688 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
    689                                               const DataLayout &DL) {
    690   Type *OrigPtrTy = Addr->getType();
    691   Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
    692   assert(OrigTy->isSized());
    693   uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
    694   if (TypeSize != 8  && TypeSize != 16 &&
    695       TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
    696     NumAccessesWithBadSize++;
    697     // Ignore all unusual sizes.
    698     return -1;
    699   }
    700   size_t Idx = countTrailingZeros(TypeSize / 8);
    701   assert(Idx < kNumberOfAccessSizes);
    702   return Idx;
    703 }
    704