1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file is a part of MemorySanitizer, a detector of uninitialized 11 /// reads. 12 /// 13 /// The algorithm of the tool is similar to Memcheck 14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every 15 /// byte of the application memory, poison the shadow of the malloc-ed 16 /// or alloca-ed memory, load the shadow bits on every memory read, 17 /// propagate the shadow bits through some of the arithmetic 18 /// instruction (including MOV), store the shadow bits on every memory 19 /// write, report a bug on some other instructions (e.g. JMP) if the 20 /// associated shadow is poisoned. 21 /// 22 /// But there are differences too. The first and the major one: 23 /// compiler instrumentation instead of binary instrumentation. This 24 /// gives us much better register allocation, possible compiler 25 /// optimizations and a fast start-up. But this brings the major issue 26 /// as well: msan needs to see all program events, including system 27 /// calls and reads/writes in system libraries, so we either need to 28 /// compile *everything* with msan or use a binary translation 29 /// component (e.g. DynamoRIO) to instrument pre-built libraries. 30 /// Another difference from Memcheck is that we use 8 shadow bits per 31 /// byte of application memory and use a direct shadow mapping. This 32 /// greatly simplifies the instrumentation code and avoids races on 33 /// shadow updates (Memcheck is single-threaded so races are not a 34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow 35 /// path storage that uses 8 bits per byte). 36 /// 37 /// The default value of shadow is 0, which means "clean" (not poisoned). 38 /// 39 /// Every module initializer should call __msan_init to ensure that the 40 /// shadow memory is ready. On error, __msan_warning is called. Since 41 /// parameters and return values may be passed via registers, we have a 42 /// specialized thread-local shadow for return values 43 /// (__msan_retval_tls) and parameters (__msan_param_tls). 44 /// 45 /// Origin tracking. 46 /// 47 /// MemorySanitizer can track origins (allocation points) of all uninitialized 48 /// values. This behavior is controlled with a flag (msan-track-origins) and is 49 /// disabled by default. 50 /// 51 /// Origins are 4-byte values created and interpreted by the runtime library. 52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes 53 /// of application memory. Propagation of origins is basically a bunch of 54 /// "select" instructions that pick the origin of a dirty argument, if an 55 /// instruction has one. 56 /// 57 /// Every 4 aligned, consecutive bytes of application memory have one origin 58 /// value associated with them. If these bytes contain uninitialized data 59 /// coming from 2 different allocations, the last store wins. Because of this, 60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in 61 /// practice. 62 /// 63 /// Origins are meaningless for fully initialized values, so MemorySanitizer 64 /// avoids storing origin to memory when a fully initialized value is stored. 65 /// This way it avoids needless overwritting origin of the 4-byte region on 66 /// a short (i.e. 1 byte) clean store, and it is also good for performance. 67 /// 68 /// Atomic handling. 69 /// 70 /// Ideally, every atomic store of application value should update the 71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store 72 /// of two disjoint locations can not be done without severe slowdown. 73 /// 74 /// Therefore, we implement an approximation that may err on the safe side. 75 /// In this implementation, every atomically accessed location in the program 76 /// may only change from (partially) uninitialized to fully initialized, but 77 /// not the other way around. We load the shadow _after_ the application load, 78 /// and we store the shadow _before_ the app store. Also, we always store clean 79 /// shadow (if the application store is atomic). This way, if the store-load 80 /// pair constitutes a happens-before arc, shadow store and load are correctly 81 /// ordered such that the load will get either the value that was stored, or 82 /// some later value (which is always clean). 83 /// 84 /// This does not work very well with Compare-And-Swap (CAS) and 85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW 86 /// must store the new shadow before the app operation, and load the shadow 87 /// after the app operation. Computers don't work this way. Current 88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean 89 /// value. It implements the store part as a simple atomic store by storing a 90 /// clean shadow. 91 92 //===----------------------------------------------------------------------===// 93 94 #include "llvm/Transforms/Instrumentation.h" 95 #include "llvm/ADT/DepthFirstIterator.h" 96 #include "llvm/ADT/SmallString.h" 97 #include "llvm/ADT/SmallVector.h" 98 #include "llvm/ADT/StringExtras.h" 99 #include "llvm/ADT/Triple.h" 100 #include "llvm/IR/DataLayout.h" 101 #include "llvm/IR/Function.h" 102 #include "llvm/IR/IRBuilder.h" 103 #include "llvm/IR/InlineAsm.h" 104 #include "llvm/IR/InstVisitor.h" 105 #include "llvm/IR/IntrinsicInst.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/MDBuilder.h" 108 #include "llvm/IR/Module.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/ValueMap.h" 111 #include "llvm/Support/CommandLine.h" 112 #include "llvm/Support/Compiler.h" 113 #include "llvm/Support/Debug.h" 114 #include "llvm/Support/raw_ostream.h" 115 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 116 #include "llvm/Transforms/Utils/Local.h" 117 #include "llvm/Transforms/Utils/ModuleUtils.h" 118 119 using namespace llvm; 120 121 #define DEBUG_TYPE "msan" 122 123 static const uint64_t kShadowMask32 = 1ULL << 31; 124 static const uint64_t kShadowMask64 = 1ULL << 46; 125 static const uint64_t kOriginOffset32 = 1ULL << 30; 126 static const uint64_t kOriginOffset64 = 1ULL << 45; 127 static const unsigned kMinOriginAlignment = 4; 128 static const unsigned kShadowTLSAlignment = 8; 129 130 // Accesses sizes are powers of two: 1, 2, 4, 8. 131 static const size_t kNumberOfAccessSizes = 4; 132 133 /// \brief Track origins of uninitialized values. 134 /// 135 /// Adds a section to MemorySanitizer report that points to the allocation 136 /// (stack or heap) the uninitialized bits came from originally. 137 static cl::opt<int> ClTrackOrigins("msan-track-origins", 138 cl::desc("Track origins (allocation sites) of poisoned memory"), 139 cl::Hidden, cl::init(0)); 140 static cl::opt<bool> ClKeepGoing("msan-keep-going", 141 cl::desc("keep going after reporting a UMR"), 142 cl::Hidden, cl::init(false)); 143 static cl::opt<bool> ClPoisonStack("msan-poison-stack", 144 cl::desc("poison uninitialized stack variables"), 145 cl::Hidden, cl::init(true)); 146 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", 147 cl::desc("poison uninitialized stack variables with a call"), 148 cl::Hidden, cl::init(false)); 149 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", 150 cl::desc("poison uninitialized stack variables with the given patter"), 151 cl::Hidden, cl::init(0xff)); 152 static cl::opt<bool> ClPoisonUndef("msan-poison-undef", 153 cl::desc("poison undef temps"), 154 cl::Hidden, cl::init(true)); 155 156 static cl::opt<bool> ClHandleICmp("msan-handle-icmp", 157 cl::desc("propagate shadow through ICmpEQ and ICmpNE"), 158 cl::Hidden, cl::init(true)); 159 160 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", 161 cl::desc("exact handling of relational integer ICmp"), 162 cl::Hidden, cl::init(false)); 163 164 // This flag controls whether we check the shadow of the address 165 // operand of load or store. Such bugs are very rare, since load from 166 // a garbage address typically results in SEGV, but still happen 167 // (e.g. only lower bits of address are garbage, or the access happens 168 // early at program startup where malloc-ed memory is more likely to 169 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. 170 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", 171 cl::desc("report accesses through a pointer which has poisoned shadow"), 172 cl::Hidden, cl::init(true)); 173 174 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", 175 cl::desc("print out instructions with default strict semantics"), 176 cl::Hidden, cl::init(false)); 177 178 static cl::opt<int> ClInstrumentationWithCallThreshold( 179 "msan-instrumentation-with-call-threshold", 180 cl::desc( 181 "If the function being instrumented requires more than " 182 "this number of checks and origin stores, use callbacks instead of " 183 "inline checks (-1 means never use callbacks)."), 184 cl::Hidden, cl::init(3500)); 185 186 // Experimental. Wraps all indirect calls in the instrumented code with 187 // a call to the given function. This is needed to assist the dynamic 188 // helper tool (MSanDR) to regain control on transition between instrumented and 189 // non-instrumented code. 190 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls", 191 cl::desc("Wrap indirect calls with a given function"), 192 cl::Hidden); 193 194 static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast", 195 cl::desc("Do not wrap indirect calls with target in the same module"), 196 cl::Hidden, cl::init(true)); 197 198 namespace { 199 200 /// \brief An instrumentation pass implementing detection of uninitialized 201 /// reads. 202 /// 203 /// MemorySanitizer: instrument the code in module to find 204 /// uninitialized reads. 205 class MemorySanitizer : public FunctionPass { 206 public: 207 MemorySanitizer(int TrackOrigins = 0) 208 : FunctionPass(ID), 209 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), 210 DL(nullptr), 211 WarningFn(nullptr), 212 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {} 213 const char *getPassName() const override { return "MemorySanitizer"; } 214 bool runOnFunction(Function &F) override; 215 bool doInitialization(Module &M) override; 216 static char ID; // Pass identification, replacement for typeid. 217 218 private: 219 void initializeCallbacks(Module &M); 220 221 /// \brief Track origins (allocation points) of uninitialized values. 222 int TrackOrigins; 223 224 const DataLayout *DL; 225 LLVMContext *C; 226 Type *IntptrTy; 227 Type *OriginTy; 228 /// \brief Thread-local shadow storage for function parameters. 229 GlobalVariable *ParamTLS; 230 /// \brief Thread-local origin storage for function parameters. 231 GlobalVariable *ParamOriginTLS; 232 /// \brief Thread-local shadow storage for function return value. 233 GlobalVariable *RetvalTLS; 234 /// \brief Thread-local origin storage for function return value. 235 GlobalVariable *RetvalOriginTLS; 236 /// \brief Thread-local shadow storage for in-register va_arg function 237 /// parameters (x86_64-specific). 238 GlobalVariable *VAArgTLS; 239 /// \brief Thread-local shadow storage for va_arg overflow area 240 /// (x86_64-specific). 241 GlobalVariable *VAArgOverflowSizeTLS; 242 /// \brief Thread-local space used to pass origin value to the UMR reporting 243 /// function. 244 GlobalVariable *OriginTLS; 245 246 GlobalVariable *MsandrModuleStart; 247 GlobalVariable *MsandrModuleEnd; 248 249 /// \brief The run-time callback to print a warning. 250 Value *WarningFn; 251 // These arrays are indexed by log2(AccessSize). 252 Value *MaybeWarningFn[kNumberOfAccessSizes]; 253 Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; 254 255 /// \brief Run-time helper that generates a new origin value for a stack 256 /// allocation. 257 Value *MsanSetAllocaOrigin4Fn; 258 /// \brief Run-time helper that poisons stack on function entry. 259 Value *MsanPoisonStackFn; 260 /// \brief Run-time helper that records a store (or any event) of an 261 /// uninitialized value and returns an updated origin id encoding this info. 262 Value *MsanChainOriginFn; 263 /// \brief MSan runtime replacements for memmove, memcpy and memset. 264 Value *MemmoveFn, *MemcpyFn, *MemsetFn; 265 266 /// \brief Address mask used in application-to-shadow address calculation. 267 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask. 268 uint64_t ShadowMask; 269 /// \brief Offset of the origin shadow from the "normal" shadow. 270 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL 271 uint64_t OriginOffset; 272 /// \brief Branch weights for error reporting. 273 MDNode *ColdCallWeights; 274 /// \brief Branch weights for origin store. 275 MDNode *OriginStoreWeights; 276 /// \brief An empty volatile inline asm that prevents callback merge. 277 InlineAsm *EmptyAsm; 278 279 bool WrapIndirectCalls; 280 /// \brief Run-time wrapper for indirect calls. 281 Value *IndirectCallWrapperFn; 282 // Argument and return type of IndirectCallWrapperFn: void (*f)(void). 283 Type *AnyFunctionPtrTy; 284 285 friend struct MemorySanitizerVisitor; 286 friend struct VarArgAMD64Helper; 287 }; 288 } // namespace 289 290 char MemorySanitizer::ID = 0; 291 INITIALIZE_PASS(MemorySanitizer, "msan", 292 "MemorySanitizer: detects uninitialized reads.", 293 false, false) 294 295 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) { 296 return new MemorySanitizer(TrackOrigins); 297 } 298 299 /// \brief Create a non-const global initialized with the given string. 300 /// 301 /// Creates a writable global for Str so that we can pass it to the 302 /// run-time lib. Runtime uses first 4 bytes of the string to store the 303 /// frame ID, so the string needs to be mutable. 304 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, 305 StringRef Str) { 306 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); 307 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, 308 GlobalValue::PrivateLinkage, StrConst, ""); 309 } 310 311 312 /// \brief Insert extern declaration of runtime-provided functions and globals. 313 void MemorySanitizer::initializeCallbacks(Module &M) { 314 // Only do this once. 315 if (WarningFn) 316 return; 317 318 IRBuilder<> IRB(*C); 319 // Create the callback. 320 // FIXME: this function should have "Cold" calling conv, 321 // which is not yet implemented. 322 StringRef WarningFnName = ClKeepGoing ? "__msan_warning" 323 : "__msan_warning_noreturn"; 324 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL); 325 326 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; 327 AccessSizeIndex++) { 328 unsigned AccessSize = 1 << AccessSizeIndex; 329 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); 330 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( 331 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 332 IRB.getInt32Ty(), NULL); 333 334 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); 335 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( 336 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), 337 IRB.getInt8PtrTy(), IRB.getInt32Ty(), NULL); 338 } 339 340 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( 341 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, 342 IRB.getInt8PtrTy(), IntptrTy, NULL); 343 MsanPoisonStackFn = M.getOrInsertFunction( 344 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); 345 MsanChainOriginFn = M.getOrInsertFunction( 346 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL); 347 MemmoveFn = M.getOrInsertFunction( 348 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 349 IRB.getInt8PtrTy(), IntptrTy, NULL); 350 MemcpyFn = M.getOrInsertFunction( 351 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 352 IntptrTy, NULL); 353 MemsetFn = M.getOrInsertFunction( 354 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), 355 IntptrTy, NULL); 356 357 // Create globals. 358 RetvalTLS = new GlobalVariable( 359 M, ArrayType::get(IRB.getInt64Ty(), 8), false, 360 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, 361 GlobalVariable::InitialExecTLSModel); 362 RetvalOriginTLS = new GlobalVariable( 363 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, 364 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 365 366 ParamTLS = new GlobalVariable( 367 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 368 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, 369 GlobalVariable::InitialExecTLSModel); 370 ParamOriginTLS = new GlobalVariable( 371 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage, 372 nullptr, "__msan_param_origin_tls", nullptr, 373 GlobalVariable::InitialExecTLSModel); 374 375 VAArgTLS = new GlobalVariable( 376 M, ArrayType::get(IRB.getInt64Ty(), 1000), false, 377 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, 378 GlobalVariable::InitialExecTLSModel); 379 VAArgOverflowSizeTLS = new GlobalVariable( 380 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 381 "__msan_va_arg_overflow_size_tls", nullptr, 382 GlobalVariable::InitialExecTLSModel); 383 OriginTLS = new GlobalVariable( 384 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, 385 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); 386 387 // We insert an empty inline asm after __msan_report* to avoid callback merge. 388 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), 389 StringRef(""), StringRef(""), 390 /*hasSideEffects=*/true); 391 392 if (WrapIndirectCalls) { 393 AnyFunctionPtrTy = 394 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false)); 395 IndirectCallWrapperFn = M.getOrInsertFunction( 396 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL); 397 } 398 399 if (WrapIndirectCalls && ClWrapIndirectCallsFast) { 400 MsandrModuleStart = new GlobalVariable( 401 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, 402 nullptr, "__executable_start"); 403 MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility); 404 MsandrModuleEnd = new GlobalVariable( 405 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, 406 nullptr, "_end"); 407 MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility); 408 } 409 } 410 411 /// \brief Module-level initialization. 412 /// 413 /// inserts a call to __msan_init to the module's constructor list. 414 bool MemorySanitizer::doInitialization(Module &M) { 415 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 416 if (!DLP) 417 report_fatal_error("data layout missing"); 418 DL = &DLP->getDataLayout(); 419 420 C = &(M.getContext()); 421 unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0); 422 switch (PtrSize) { 423 case 64: 424 ShadowMask = kShadowMask64; 425 OriginOffset = kOriginOffset64; 426 break; 427 case 32: 428 ShadowMask = kShadowMask32; 429 OriginOffset = kOriginOffset32; 430 break; 431 default: 432 report_fatal_error("unsupported pointer size"); 433 break; 434 } 435 436 IRBuilder<> IRB(*C); 437 IntptrTy = IRB.getIntPtrTy(DL); 438 OriginTy = IRB.getInt32Ty(); 439 440 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); 441 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); 442 443 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. 444 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction( 445 "__msan_init", IRB.getVoidTy(), NULL)), 0); 446 447 if (TrackOrigins) 448 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 449 IRB.getInt32(TrackOrigins), "__msan_track_origins"); 450 451 if (ClKeepGoing) 452 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, 453 IRB.getInt32(ClKeepGoing), "__msan_keep_going"); 454 455 return true; 456 } 457 458 namespace { 459 460 /// \brief A helper class that handles instrumentation of VarArg 461 /// functions on a particular platform. 462 /// 463 /// Implementations are expected to insert the instrumentation 464 /// necessary to propagate argument shadow through VarArg function 465 /// calls. Visit* methods are called during an InstVisitor pass over 466 /// the function, and should avoid creating new basic blocks. A new 467 /// instance of this class is created for each instrumented function. 468 struct VarArgHelper { 469 /// \brief Visit a CallSite. 470 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; 471 472 /// \brief Visit a va_start call. 473 virtual void visitVAStartInst(VAStartInst &I) = 0; 474 475 /// \brief Visit a va_copy call. 476 virtual void visitVACopyInst(VACopyInst &I) = 0; 477 478 /// \brief Finalize function instrumentation. 479 /// 480 /// This method is called after visiting all interesting (see above) 481 /// instructions in a function. 482 virtual void finalizeInstrumentation() = 0; 483 484 virtual ~VarArgHelper() {} 485 }; 486 487 struct MemorySanitizerVisitor; 488 489 VarArgHelper* 490 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 491 MemorySanitizerVisitor &Visitor); 492 493 unsigned TypeSizeToSizeIndex(unsigned TypeSize) { 494 if (TypeSize <= 8) return 0; 495 return Log2_32_Ceil(TypeSize / 8); 496 } 497 498 /// This class does all the work for a given function. Store and Load 499 /// instructions store and load corresponding shadow and origin 500 /// values. Most instructions propagate shadow from arguments to their 501 /// return values. Certain instructions (most importantly, BranchInst) 502 /// test their argument shadow and print reports (with a runtime call) if it's 503 /// non-zero. 504 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { 505 Function &F; 506 MemorySanitizer &MS; 507 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; 508 ValueMap<Value*, Value*> ShadowMap, OriginMap; 509 std::unique_ptr<VarArgHelper> VAHelper; 510 511 // The following flags disable parts of MSan instrumentation based on 512 // blacklist contents and command-line options. 513 bool InsertChecks; 514 bool PropagateShadow; 515 bool PoisonStack; 516 bool PoisonUndef; 517 bool CheckReturnValue; 518 519 struct ShadowOriginAndInsertPoint { 520 Value *Shadow; 521 Value *Origin; 522 Instruction *OrigIns; 523 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) 524 : Shadow(S), Origin(O), OrigIns(I) { } 525 }; 526 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; 527 SmallVector<Instruction*, 16> StoreList; 528 SmallVector<CallSite, 16> IndirectCallList; 529 530 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) 531 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { 532 bool SanitizeFunction = F.getAttributes().hasAttribute( 533 AttributeSet::FunctionIndex, Attribute::SanitizeMemory); 534 InsertChecks = SanitizeFunction; 535 PropagateShadow = SanitizeFunction; 536 PoisonStack = SanitizeFunction && ClPoisonStack; 537 PoisonUndef = SanitizeFunction && ClPoisonUndef; 538 // FIXME: Consider using SpecialCaseList to specify a list of functions that 539 // must always return fully initialized values. For now, we hardcode "main". 540 CheckReturnValue = SanitizeFunction && (F.getName() == "main"); 541 542 DEBUG(if (!InsertChecks) 543 dbgs() << "MemorySanitizer is not inserting checks into '" 544 << F.getName() << "'\n"); 545 } 546 547 Value *updateOrigin(Value *V, IRBuilder<> &IRB) { 548 if (MS.TrackOrigins <= 1) return V; 549 return IRB.CreateCall(MS.MsanChainOriginFn, V); 550 } 551 552 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, 553 unsigned Alignment, bool AsCall) { 554 if (isa<StructType>(Shadow->getType())) { 555 IRB.CreateAlignedStore(updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB), 556 Alignment); 557 } else { 558 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 559 // TODO(eugenis): handle non-zero constant shadow by inserting an 560 // unconditional check (can not simply fail compilation as this could 561 // be in the dead code). 562 if (isa<Constant>(ConvertedShadow)) return; 563 unsigned TypeSizeInBits = 564 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 565 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 566 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 567 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; 568 Value *ConvertedShadow2 = IRB.CreateZExt( 569 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 570 IRB.CreateCall3(Fn, ConvertedShadow2, 571 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 572 Origin); 573 } else { 574 Value *Cmp = IRB.CreateICmpNE( 575 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); 576 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 577 Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights); 578 IRBuilder<> IRBNew(CheckTerm); 579 IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew), 580 getOriginPtr(Addr, IRBNew), Alignment); 581 } 582 } 583 } 584 585 void materializeStores(bool InstrumentWithCalls) { 586 for (auto Inst : StoreList) { 587 StoreInst &SI = *dyn_cast<StoreInst>(Inst); 588 589 IRBuilder<> IRB(&SI); 590 Value *Val = SI.getValueOperand(); 591 Value *Addr = SI.getPointerOperand(); 592 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val); 593 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 594 595 StoreInst *NewSI = 596 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment()); 597 DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); 598 (void)NewSI; 599 600 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI); 601 602 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering())); 603 604 if (MS.TrackOrigins) { 605 unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment()); 606 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment, 607 InstrumentWithCalls); 608 } 609 } 610 } 611 612 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, 613 bool AsCall) { 614 IRBuilder<> IRB(OrigIns); 615 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); 616 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); 617 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); 618 // See the comment in materializeStores(). 619 if (isa<Constant>(ConvertedShadow)) return; 620 unsigned TypeSizeInBits = 621 MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); 622 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); 623 if (AsCall && SizeIndex < kNumberOfAccessSizes) { 624 Value *Fn = MS.MaybeWarningFn[SizeIndex]; 625 Value *ConvertedShadow2 = 626 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); 627 IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin 628 ? Origin 629 : (Value *)IRB.getInt32(0)); 630 } else { 631 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, 632 getCleanShadow(ConvertedShadow), "_mscmp"); 633 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 634 Cmp, OrigIns, 635 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); 636 637 IRB.SetInsertPoint(CheckTerm); 638 if (MS.TrackOrigins) { 639 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), 640 MS.OriginTLS); 641 } 642 IRB.CreateCall(MS.WarningFn); 643 IRB.CreateCall(MS.EmptyAsm); 644 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); 645 } 646 } 647 648 void materializeChecks(bool InstrumentWithCalls) { 649 for (const auto &ShadowData : InstrumentationList) { 650 Instruction *OrigIns = ShadowData.OrigIns; 651 Value *Shadow = ShadowData.Shadow; 652 Value *Origin = ShadowData.Origin; 653 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); 654 } 655 DEBUG(dbgs() << "DONE:\n" << F); 656 } 657 658 void materializeIndirectCalls() { 659 for (auto &CS : IndirectCallList) { 660 Instruction *I = CS.getInstruction(); 661 BasicBlock *B = I->getParent(); 662 IRBuilder<> IRB(I); 663 Value *Fn0 = CS.getCalledValue(); 664 Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy); 665 666 if (ClWrapIndirectCallsFast) { 667 // Check that call target is inside this module limits. 668 Value *Start = 669 IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy); 670 Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy); 671 672 Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start), 673 IRB.CreateICmpUGE(Fn, End)); 674 675 PHINode *NewFnPhi = 676 IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target"); 677 678 Instruction *CheckTerm = SplitBlockAndInsertIfThen( 679 NotInThisModule, NewFnPhi, 680 /* Unreachable */ false, MS.ColdCallWeights); 681 682 IRB.SetInsertPoint(CheckTerm); 683 // Slow path: call wrapper function to possibly transform the call 684 // target. 685 Value *NewFn = IRB.CreateBitCast( 686 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType()); 687 688 NewFnPhi->addIncoming(Fn0, B); 689 NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent()); 690 CS.setCalledFunction(NewFnPhi); 691 } else { 692 Value *NewFn = IRB.CreateBitCast( 693 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType()); 694 CS.setCalledFunction(NewFn); 695 } 696 } 697 } 698 699 /// \brief Add MemorySanitizer instrumentation to a function. 700 bool runOnFunction() { 701 MS.initializeCallbacks(*F.getParent()); 702 if (!MS.DL) return false; 703 704 // In the presence of unreachable blocks, we may see Phi nodes with 705 // incoming nodes from such blocks. Since InstVisitor skips unreachable 706 // blocks, such nodes will not have any shadow value associated with them. 707 // It's easier to remove unreachable blocks than deal with missing shadow. 708 removeUnreachableBlocks(F); 709 710 // Iterate all BBs in depth-first order and create shadow instructions 711 // for all instructions (where applicable). 712 // For PHI nodes we create dummy shadow PHIs which will be finalized later. 713 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) 714 visit(*BB); 715 716 717 // Finalize PHI nodes. 718 for (PHINode *PN : ShadowPHINodes) { 719 PHINode *PNS = cast<PHINode>(getShadow(PN)); 720 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr; 721 size_t NumValues = PN->getNumIncomingValues(); 722 for (size_t v = 0; v < NumValues; v++) { 723 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); 724 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); 725 } 726 } 727 728 VAHelper->finalizeInstrumentation(); 729 730 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && 731 InstrumentationList.size() + StoreList.size() > 732 (unsigned)ClInstrumentationWithCallThreshold; 733 734 // Delayed instrumentation of StoreInst. 735 // This may add new checks to be inserted later. 736 materializeStores(InstrumentWithCalls); 737 738 // Insert shadow value checks. 739 materializeChecks(InstrumentWithCalls); 740 741 // Wrap indirect calls. 742 materializeIndirectCalls(); 743 744 return true; 745 } 746 747 /// \brief Compute the shadow type that corresponds to a given Value. 748 Type *getShadowTy(Value *V) { 749 return getShadowTy(V->getType()); 750 } 751 752 /// \brief Compute the shadow type that corresponds to a given Type. 753 Type *getShadowTy(Type *OrigTy) { 754 if (!OrigTy->isSized()) { 755 return nullptr; 756 } 757 // For integer type, shadow is the same as the original type. 758 // This may return weird-sized types like i1. 759 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) 760 return IT; 761 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { 762 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType()); 763 return VectorType::get(IntegerType::get(*MS.C, EltSize), 764 VT->getNumElements()); 765 } 766 if (StructType *ST = dyn_cast<StructType>(OrigTy)) { 767 SmallVector<Type*, 4> Elements; 768 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 769 Elements.push_back(getShadowTy(ST->getElementType(i))); 770 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); 771 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); 772 return Res; 773 } 774 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy); 775 return IntegerType::get(*MS.C, TypeSize); 776 } 777 778 /// \brief Flatten a vector type. 779 Type *getShadowTyNoVec(Type *ty) { 780 if (VectorType *vt = dyn_cast<VectorType>(ty)) 781 return IntegerType::get(*MS.C, vt->getBitWidth()); 782 return ty; 783 } 784 785 /// \brief Convert a shadow value to it's flattened variant. 786 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { 787 Type *Ty = V->getType(); 788 Type *NoVecTy = getShadowTyNoVec(Ty); 789 if (Ty == NoVecTy) return V; 790 return IRB.CreateBitCast(V, NoVecTy); 791 } 792 793 /// \brief Compute the shadow address that corresponds to a given application 794 /// address. 795 /// 796 /// Shadow = Addr & ~ShadowMask. 797 Value *getShadowPtr(Value *Addr, Type *ShadowTy, 798 IRBuilder<> &IRB) { 799 Value *ShadowLong = 800 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 801 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 802 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); 803 } 804 805 /// \brief Compute the origin address that corresponds to a given application 806 /// address. 807 /// 808 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL 809 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) { 810 Value *ShadowLong = 811 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), 812 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); 813 Value *Add = 814 IRB.CreateAdd(ShadowLong, 815 ConstantInt::get(MS.IntptrTy, MS.OriginOffset)); 816 Value *SecondAnd = 817 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL)); 818 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0)); 819 } 820 821 /// \brief Compute the shadow address for a given function argument. 822 /// 823 /// Shadow = ParamTLS+ArgOffset. 824 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, 825 int ArgOffset) { 826 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); 827 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 828 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 829 "_msarg"); 830 } 831 832 /// \brief Compute the origin address for a given function argument. 833 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, 834 int ArgOffset) { 835 if (!MS.TrackOrigins) return nullptr; 836 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); 837 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 838 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), 839 "_msarg_o"); 840 } 841 842 /// \brief Compute the shadow address for a retval. 843 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { 844 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); 845 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), 846 "_msret"); 847 } 848 849 /// \brief Compute the origin address for a retval. 850 Value *getOriginPtrForRetval(IRBuilder<> &IRB) { 851 // We keep a single origin for the entire retval. Might be too optimistic. 852 return MS.RetvalOriginTLS; 853 } 854 855 /// \brief Set SV to be the shadow value for V. 856 void setShadow(Value *V, Value *SV) { 857 assert(!ShadowMap.count(V) && "Values may only have one shadow"); 858 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V); 859 } 860 861 /// \brief Set Origin to be the origin value for V. 862 void setOrigin(Value *V, Value *Origin) { 863 if (!MS.TrackOrigins) return; 864 assert(!OriginMap.count(V) && "Values may only have one origin"); 865 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); 866 OriginMap[V] = Origin; 867 } 868 869 /// \brief Create a clean shadow value for a given value. 870 /// 871 /// Clean shadow (all zeroes) means all bits of the value are defined 872 /// (initialized). 873 Constant *getCleanShadow(Value *V) { 874 Type *ShadowTy = getShadowTy(V); 875 if (!ShadowTy) 876 return nullptr; 877 return Constant::getNullValue(ShadowTy); 878 } 879 880 /// \brief Create a dirty shadow of a given shadow type. 881 Constant *getPoisonedShadow(Type *ShadowTy) { 882 assert(ShadowTy); 883 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) 884 return Constant::getAllOnesValue(ShadowTy); 885 StructType *ST = cast<StructType>(ShadowTy); 886 SmallVector<Constant *, 4> Vals; 887 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) 888 Vals.push_back(getPoisonedShadow(ST->getElementType(i))); 889 return ConstantStruct::get(ST, Vals); 890 } 891 892 /// \brief Create a dirty shadow for a given value. 893 Constant *getPoisonedShadow(Value *V) { 894 Type *ShadowTy = getShadowTy(V); 895 if (!ShadowTy) 896 return nullptr; 897 return getPoisonedShadow(ShadowTy); 898 } 899 900 /// \brief Create a clean (zero) origin. 901 Value *getCleanOrigin() { 902 return Constant::getNullValue(MS.OriginTy); 903 } 904 905 /// \brief Get the shadow value for a given Value. 906 /// 907 /// This function either returns the value set earlier with setShadow, 908 /// or extracts if from ParamTLS (for function arguments). 909 Value *getShadow(Value *V) { 910 if (!PropagateShadow) return getCleanShadow(V); 911 if (Instruction *I = dyn_cast<Instruction>(V)) { 912 // For instructions the shadow is already stored in the map. 913 Value *Shadow = ShadowMap[V]; 914 if (!Shadow) { 915 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); 916 (void)I; 917 assert(Shadow && "No shadow for a value"); 918 } 919 return Shadow; 920 } 921 if (UndefValue *U = dyn_cast<UndefValue>(V)) { 922 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); 923 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); 924 (void)U; 925 return AllOnes; 926 } 927 if (Argument *A = dyn_cast<Argument>(V)) { 928 // For arguments we compute the shadow on demand and store it in the map. 929 Value **ShadowPtr = &ShadowMap[V]; 930 if (*ShadowPtr) 931 return *ShadowPtr; 932 Function *F = A->getParent(); 933 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); 934 unsigned ArgOffset = 0; 935 for (auto &FArg : F->args()) { 936 if (!FArg.getType()->isSized()) { 937 DEBUG(dbgs() << "Arg is not sized\n"); 938 continue; 939 } 940 unsigned Size = FArg.hasByValAttr() 941 ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType()) 942 : MS.DL->getTypeAllocSize(FArg.getType()); 943 if (A == &FArg) { 944 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); 945 if (FArg.hasByValAttr()) { 946 // ByVal pointer itself has clean shadow. We copy the actual 947 // argument shadow to the underlying memory. 948 // Figure out maximal valid memcpy alignment. 949 unsigned ArgAlign = FArg.getParamAlignment(); 950 if (ArgAlign == 0) { 951 Type *EltType = A->getType()->getPointerElementType(); 952 ArgAlign = MS.DL->getABITypeAlignment(EltType); 953 } 954 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); 955 Value *Cpy = EntryIRB.CreateMemCpy( 956 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, 957 CopyAlign); 958 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); 959 (void)Cpy; 960 *ShadowPtr = getCleanShadow(V); 961 } else { 962 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); 963 } 964 DEBUG(dbgs() << " ARG: " << FArg << " ==> " << 965 **ShadowPtr << "\n"); 966 if (MS.TrackOrigins) { 967 Value *OriginPtr = 968 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); 969 setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); 970 } 971 } 972 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment); 973 } 974 assert(*ShadowPtr && "Could not find shadow for an argument"); 975 return *ShadowPtr; 976 } 977 // For everything else the shadow is zero. 978 return getCleanShadow(V); 979 } 980 981 /// \brief Get the shadow for i-th argument of the instruction I. 982 Value *getShadow(Instruction *I, int i) { 983 return getShadow(I->getOperand(i)); 984 } 985 986 /// \brief Get the origin for a value. 987 Value *getOrigin(Value *V) { 988 if (!MS.TrackOrigins) return nullptr; 989 if (isa<Instruction>(V) || isa<Argument>(V)) { 990 Value *Origin = OriginMap[V]; 991 if (!Origin) { 992 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n"); 993 Origin = getCleanOrigin(); 994 } 995 return Origin; 996 } 997 return getCleanOrigin(); 998 } 999 1000 /// \brief Get the origin for i-th argument of the instruction I. 1001 Value *getOrigin(Instruction *I, int i) { 1002 return getOrigin(I->getOperand(i)); 1003 } 1004 1005 /// \brief Remember the place where a shadow check should be inserted. 1006 /// 1007 /// This location will be later instrumented with a check that will print a 1008 /// UMR warning in runtime if the shadow value is not 0. 1009 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) { 1010 assert(Shadow); 1011 if (!InsertChecks) return; 1012 #ifndef NDEBUG 1013 Type *ShadowTy = Shadow->getType(); 1014 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && 1015 "Can only insert checks for integer and vector shadow types"); 1016 #endif 1017 InstrumentationList.push_back( 1018 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); 1019 } 1020 1021 /// \brief Remember the place where a shadow check should be inserted. 1022 /// 1023 /// This location will be later instrumented with a check that will print a 1024 /// UMR warning in runtime if the value is not fully defined. 1025 void insertShadowCheck(Value *Val, Instruction *OrigIns) { 1026 assert(Val); 1027 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); 1028 if (!Shadow) return; 1029 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); 1030 insertShadowCheck(Shadow, Origin, OrigIns); 1031 } 1032 1033 AtomicOrdering addReleaseOrdering(AtomicOrdering a) { 1034 switch (a) { 1035 case NotAtomic: 1036 return NotAtomic; 1037 case Unordered: 1038 case Monotonic: 1039 case Release: 1040 return Release; 1041 case Acquire: 1042 case AcquireRelease: 1043 return AcquireRelease; 1044 case SequentiallyConsistent: 1045 return SequentiallyConsistent; 1046 } 1047 llvm_unreachable("Unknown ordering"); 1048 } 1049 1050 AtomicOrdering addAcquireOrdering(AtomicOrdering a) { 1051 switch (a) { 1052 case NotAtomic: 1053 return NotAtomic; 1054 case Unordered: 1055 case Monotonic: 1056 case Acquire: 1057 return Acquire; 1058 case Release: 1059 case AcquireRelease: 1060 return AcquireRelease; 1061 case SequentiallyConsistent: 1062 return SequentiallyConsistent; 1063 } 1064 llvm_unreachable("Unknown ordering"); 1065 } 1066 1067 // ------------------- Visitors. 1068 1069 /// \brief Instrument LoadInst 1070 /// 1071 /// Loads the corresponding shadow and (optionally) origin. 1072 /// Optionally, checks that the load address is fully defined. 1073 void visitLoadInst(LoadInst &I) { 1074 assert(I.getType()->isSized() && "Load type must have size"); 1075 IRBuilder<> IRB(I.getNextNode()); 1076 Type *ShadowTy = getShadowTy(&I); 1077 Value *Addr = I.getPointerOperand(); 1078 if (PropagateShadow) { 1079 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1080 setShadow(&I, 1081 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); 1082 } else { 1083 setShadow(&I, getCleanShadow(&I)); 1084 } 1085 1086 if (ClCheckAccessAddress) 1087 insertShadowCheck(I.getPointerOperand(), &I); 1088 1089 if (I.isAtomic()) 1090 I.setOrdering(addAcquireOrdering(I.getOrdering())); 1091 1092 if (MS.TrackOrigins) { 1093 if (PropagateShadow) { 1094 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); 1095 setOrigin(&I, 1096 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment)); 1097 } else { 1098 setOrigin(&I, getCleanOrigin()); 1099 } 1100 } 1101 } 1102 1103 /// \brief Instrument StoreInst 1104 /// 1105 /// Stores the corresponding shadow and (optionally) origin. 1106 /// Optionally, checks that the store address is fully defined. 1107 void visitStoreInst(StoreInst &I) { 1108 StoreList.push_back(&I); 1109 } 1110 1111 void handleCASOrRMW(Instruction &I) { 1112 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); 1113 1114 IRBuilder<> IRB(&I); 1115 Value *Addr = I.getOperand(0); 1116 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); 1117 1118 if (ClCheckAccessAddress) 1119 insertShadowCheck(Addr, &I); 1120 1121 // Only test the conditional argument of cmpxchg instruction. 1122 // The other argument can potentially be uninitialized, but we can not 1123 // detect this situation reliably without possible false positives. 1124 if (isa<AtomicCmpXchgInst>(I)) 1125 insertShadowCheck(I.getOperand(1), &I); 1126 1127 IRB.CreateStore(getCleanShadow(&I), ShadowPtr); 1128 1129 setShadow(&I, getCleanShadow(&I)); 1130 } 1131 1132 void visitAtomicRMWInst(AtomicRMWInst &I) { 1133 handleCASOrRMW(I); 1134 I.setOrdering(addReleaseOrdering(I.getOrdering())); 1135 } 1136 1137 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 1138 handleCASOrRMW(I); 1139 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); 1140 } 1141 1142 // Vector manipulation. 1143 void visitExtractElementInst(ExtractElementInst &I) { 1144 insertShadowCheck(I.getOperand(1), &I); 1145 IRBuilder<> IRB(&I); 1146 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), 1147 "_msprop")); 1148 setOrigin(&I, getOrigin(&I, 0)); 1149 } 1150 1151 void visitInsertElementInst(InsertElementInst &I) { 1152 insertShadowCheck(I.getOperand(2), &I); 1153 IRBuilder<> IRB(&I); 1154 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), 1155 I.getOperand(2), "_msprop")); 1156 setOriginForNaryOp(I); 1157 } 1158 1159 void visitShuffleVectorInst(ShuffleVectorInst &I) { 1160 insertShadowCheck(I.getOperand(2), &I); 1161 IRBuilder<> IRB(&I); 1162 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), 1163 I.getOperand(2), "_msprop")); 1164 setOriginForNaryOp(I); 1165 } 1166 1167 // Casts. 1168 void visitSExtInst(SExtInst &I) { 1169 IRBuilder<> IRB(&I); 1170 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); 1171 setOrigin(&I, getOrigin(&I, 0)); 1172 } 1173 1174 void visitZExtInst(ZExtInst &I) { 1175 IRBuilder<> IRB(&I); 1176 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); 1177 setOrigin(&I, getOrigin(&I, 0)); 1178 } 1179 1180 void visitTruncInst(TruncInst &I) { 1181 IRBuilder<> IRB(&I); 1182 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); 1183 setOrigin(&I, getOrigin(&I, 0)); 1184 } 1185 1186 void visitBitCastInst(BitCastInst &I) { 1187 IRBuilder<> IRB(&I); 1188 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); 1189 setOrigin(&I, getOrigin(&I, 0)); 1190 } 1191 1192 void visitPtrToIntInst(PtrToIntInst &I) { 1193 IRBuilder<> IRB(&I); 1194 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1195 "_msprop_ptrtoint")); 1196 setOrigin(&I, getOrigin(&I, 0)); 1197 } 1198 1199 void visitIntToPtrInst(IntToPtrInst &I) { 1200 IRBuilder<> IRB(&I); 1201 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, 1202 "_msprop_inttoptr")); 1203 setOrigin(&I, getOrigin(&I, 0)); 1204 } 1205 1206 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } 1207 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } 1208 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } 1209 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } 1210 void visitFPExtInst(CastInst& I) { handleShadowOr(I); } 1211 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } 1212 1213 /// \brief Propagate shadow for bitwise AND. 1214 /// 1215 /// This code is exact, i.e. if, for example, a bit in the left argument 1216 /// is defined and 0, then neither the value not definedness of the 1217 /// corresponding bit in B don't affect the resulting shadow. 1218 void visitAnd(BinaryOperator &I) { 1219 IRBuilder<> IRB(&I); 1220 // "And" of 0 and a poisoned value results in unpoisoned value. 1221 // 1&1 => 1; 0&1 => 0; p&1 => p; 1222 // 1&0 => 0; 0&0 => 0; p&0 => 0; 1223 // 1&p => p; 0&p => 0; p&p => p; 1224 // S = (S1 & S2) | (V1 & S2) | (S1 & V2) 1225 Value *S1 = getShadow(&I, 0); 1226 Value *S2 = getShadow(&I, 1); 1227 Value *V1 = I.getOperand(0); 1228 Value *V2 = I.getOperand(1); 1229 if (V1->getType() != S1->getType()) { 1230 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1231 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1232 } 1233 Value *S1S2 = IRB.CreateAnd(S1, S2); 1234 Value *V1S2 = IRB.CreateAnd(V1, S2); 1235 Value *S1V2 = IRB.CreateAnd(S1, V2); 1236 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1237 setOriginForNaryOp(I); 1238 } 1239 1240 void visitOr(BinaryOperator &I) { 1241 IRBuilder<> IRB(&I); 1242 // "Or" of 1 and a poisoned value results in unpoisoned value. 1243 // 1|1 => 1; 0|1 => 1; p|1 => 1; 1244 // 1|0 => 1; 0|0 => 0; p|0 => p; 1245 // 1|p => 1; 0|p => p; p|p => p; 1246 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) 1247 Value *S1 = getShadow(&I, 0); 1248 Value *S2 = getShadow(&I, 1); 1249 Value *V1 = IRB.CreateNot(I.getOperand(0)); 1250 Value *V2 = IRB.CreateNot(I.getOperand(1)); 1251 if (V1->getType() != S1->getType()) { 1252 V1 = IRB.CreateIntCast(V1, S1->getType(), false); 1253 V2 = IRB.CreateIntCast(V2, S2->getType(), false); 1254 } 1255 Value *S1S2 = IRB.CreateAnd(S1, S2); 1256 Value *V1S2 = IRB.CreateAnd(V1, S2); 1257 Value *S1V2 = IRB.CreateAnd(S1, V2); 1258 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); 1259 setOriginForNaryOp(I); 1260 } 1261 1262 /// \brief Default propagation of shadow and/or origin. 1263 /// 1264 /// This class implements the general case of shadow propagation, used in all 1265 /// cases where we don't know and/or don't care about what the operation 1266 /// actually does. It converts all input shadow values to a common type 1267 /// (extending or truncating as necessary), and bitwise OR's them. 1268 /// 1269 /// This is much cheaper than inserting checks (i.e. requiring inputs to be 1270 /// fully initialized), and less prone to false positives. 1271 /// 1272 /// This class also implements the general case of origin propagation. For a 1273 /// Nary operation, result origin is set to the origin of an argument that is 1274 /// not entirely initialized. If there is more than one such arguments, the 1275 /// rightmost of them is picked. It does not matter which one is picked if all 1276 /// arguments are initialized. 1277 template <bool CombineShadow> 1278 class Combiner { 1279 Value *Shadow; 1280 Value *Origin; 1281 IRBuilder<> &IRB; 1282 MemorySanitizerVisitor *MSV; 1283 1284 public: 1285 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : 1286 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {} 1287 1288 /// \brief Add a pair of shadow and origin values to the mix. 1289 Combiner &Add(Value *OpShadow, Value *OpOrigin) { 1290 if (CombineShadow) { 1291 assert(OpShadow); 1292 if (!Shadow) 1293 Shadow = OpShadow; 1294 else { 1295 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); 1296 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); 1297 } 1298 } 1299 1300 if (MSV->MS.TrackOrigins) { 1301 assert(OpOrigin); 1302 if (!Origin) { 1303 Origin = OpOrigin; 1304 } else { 1305 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin); 1306 // No point in adding something that might result in 0 origin value. 1307 if (!ConstOrigin || !ConstOrigin->isNullValue()) { 1308 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); 1309 Value *Cond = 1310 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow)); 1311 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); 1312 } 1313 } 1314 } 1315 return *this; 1316 } 1317 1318 /// \brief Add an application value to the mix. 1319 Combiner &Add(Value *V) { 1320 Value *OpShadow = MSV->getShadow(V); 1321 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; 1322 return Add(OpShadow, OpOrigin); 1323 } 1324 1325 /// \brief Set the current combined values as the given instruction's shadow 1326 /// and origin. 1327 void Done(Instruction *I) { 1328 if (CombineShadow) { 1329 assert(Shadow); 1330 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); 1331 MSV->setShadow(I, Shadow); 1332 } 1333 if (MSV->MS.TrackOrigins) { 1334 assert(Origin); 1335 MSV->setOrigin(I, Origin); 1336 } 1337 } 1338 }; 1339 1340 typedef Combiner<true> ShadowAndOriginCombiner; 1341 typedef Combiner<false> OriginCombiner; 1342 1343 /// \brief Propagate origin for arbitrary operation. 1344 void setOriginForNaryOp(Instruction &I) { 1345 if (!MS.TrackOrigins) return; 1346 IRBuilder<> IRB(&I); 1347 OriginCombiner OC(this, IRB); 1348 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1349 OC.Add(OI->get()); 1350 OC.Done(&I); 1351 } 1352 1353 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { 1354 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && 1355 "Vector of pointers is not a valid shadow type"); 1356 return Ty->isVectorTy() ? 1357 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : 1358 Ty->getPrimitiveSizeInBits(); 1359 } 1360 1361 /// \brief Cast between two shadow types, extending or truncating as 1362 /// necessary. 1363 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, 1364 bool Signed = false) { 1365 Type *srcTy = V->getType(); 1366 if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) 1367 return IRB.CreateIntCast(V, dstTy, Signed); 1368 if (dstTy->isVectorTy() && srcTy->isVectorTy() && 1369 dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) 1370 return IRB.CreateIntCast(V, dstTy, Signed); 1371 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); 1372 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); 1373 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); 1374 Value *V2 = 1375 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed); 1376 return IRB.CreateBitCast(V2, dstTy); 1377 // TODO: handle struct types. 1378 } 1379 1380 /// \brief Cast an application value to the type of its own shadow. 1381 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { 1382 Type *ShadowTy = getShadowTy(V); 1383 if (V->getType() == ShadowTy) 1384 return V; 1385 if (V->getType()->isPtrOrPtrVectorTy()) 1386 return IRB.CreatePtrToInt(V, ShadowTy); 1387 else 1388 return IRB.CreateBitCast(V, ShadowTy); 1389 } 1390 1391 /// \brief Propagate shadow for arbitrary operation. 1392 void handleShadowOr(Instruction &I) { 1393 IRBuilder<> IRB(&I); 1394 ShadowAndOriginCombiner SC(this, IRB); 1395 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) 1396 SC.Add(OI->get()); 1397 SC.Done(&I); 1398 } 1399 1400 // \brief Handle multiplication by constant. 1401 // 1402 // Handle a special case of multiplication by constant that may have one or 1403 // more zeros in the lower bits. This makes corresponding number of lower bits 1404 // of the result zero as well. We model it by shifting the other operand 1405 // shadow left by the required number of bits. Effectively, we transform 1406 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B). 1407 // We use multiplication by 2**N instead of shift to cover the case of 1408 // multiplication by 0, which may occur in some elements of a vector operand. 1409 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg, 1410 Value *OtherArg) { 1411 Constant *ShadowMul; 1412 Type *Ty = ConstArg->getType(); 1413 if (Ty->isVectorTy()) { 1414 unsigned NumElements = Ty->getVectorNumElements(); 1415 Type *EltTy = Ty->getSequentialElementType(); 1416 SmallVector<Constant *, 16> Elements; 1417 for (unsigned Idx = 0; Idx < NumElements; ++Idx) { 1418 ConstantInt *Elt = 1419 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx)); 1420 APInt V = Elt->getValue(); 1421 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); 1422 Elements.push_back(ConstantInt::get(EltTy, V2)); 1423 } 1424 ShadowMul = ConstantVector::get(Elements); 1425 } else { 1426 ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg); 1427 APInt V = Elt->getValue(); 1428 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); 1429 ShadowMul = ConstantInt::get(Elt->getType(), V2); 1430 } 1431 1432 IRBuilder<> IRB(&I); 1433 setShadow(&I, 1434 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst")); 1435 setOrigin(&I, getOrigin(OtherArg)); 1436 } 1437 1438 void visitMul(BinaryOperator &I) { 1439 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1440 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1441 if (constOp0 && !constOp1) 1442 handleMulByConstant(I, constOp0, I.getOperand(1)); 1443 else if (constOp1 && !constOp0) 1444 handleMulByConstant(I, constOp1, I.getOperand(0)); 1445 else 1446 handleShadowOr(I); 1447 } 1448 1449 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } 1450 void visitFSub(BinaryOperator &I) { handleShadowOr(I); } 1451 void visitFMul(BinaryOperator &I) { handleShadowOr(I); } 1452 void visitAdd(BinaryOperator &I) { handleShadowOr(I); } 1453 void visitSub(BinaryOperator &I) { handleShadowOr(I); } 1454 void visitXor(BinaryOperator &I) { handleShadowOr(I); } 1455 1456 void handleDiv(Instruction &I) { 1457 IRBuilder<> IRB(&I); 1458 // Strict on the second argument. 1459 insertShadowCheck(I.getOperand(1), &I); 1460 setShadow(&I, getShadow(&I, 0)); 1461 setOrigin(&I, getOrigin(&I, 0)); 1462 } 1463 1464 void visitUDiv(BinaryOperator &I) { handleDiv(I); } 1465 void visitSDiv(BinaryOperator &I) { handleDiv(I); } 1466 void visitFDiv(BinaryOperator &I) { handleDiv(I); } 1467 void visitURem(BinaryOperator &I) { handleDiv(I); } 1468 void visitSRem(BinaryOperator &I) { handleDiv(I); } 1469 void visitFRem(BinaryOperator &I) { handleDiv(I); } 1470 1471 /// \brief Instrument == and != comparisons. 1472 /// 1473 /// Sometimes the comparison result is known even if some of the bits of the 1474 /// arguments are not. 1475 void handleEqualityComparison(ICmpInst &I) { 1476 IRBuilder<> IRB(&I); 1477 Value *A = I.getOperand(0); 1478 Value *B = I.getOperand(1); 1479 Value *Sa = getShadow(A); 1480 Value *Sb = getShadow(B); 1481 1482 // Get rid of pointers and vectors of pointers. 1483 // For ints (and vectors of ints), types of A and Sa match, 1484 // and this is a no-op. 1485 A = IRB.CreatePointerCast(A, Sa->getType()); 1486 B = IRB.CreatePointerCast(B, Sb->getType()); 1487 1488 // A == B <==> (C = A^B) == 0 1489 // A != B <==> (C = A^B) != 0 1490 // Sc = Sa | Sb 1491 Value *C = IRB.CreateXor(A, B); 1492 Value *Sc = IRB.CreateOr(Sa, Sb); 1493 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) 1494 // Result is defined if one of the following is true 1495 // * there is a defined 1 bit in C 1496 // * C is fully defined 1497 // Si = !(C & ~Sc) && Sc 1498 Value *Zero = Constant::getNullValue(Sc->getType()); 1499 Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); 1500 Value *Si = 1501 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), 1502 IRB.CreateICmpEQ( 1503 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); 1504 Si->setName("_msprop_icmp"); 1505 setShadow(&I, Si); 1506 setOriginForNaryOp(I); 1507 } 1508 1509 /// \brief Build the lowest possible value of V, taking into account V's 1510 /// uninitialized bits. 1511 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1512 bool isSigned) { 1513 if (isSigned) { 1514 // Split shadow into sign bit and other bits. 1515 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1516 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1517 // Maximise the undefined shadow bit, minimize other undefined bits. 1518 return 1519 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); 1520 } else { 1521 // Minimize undefined bits. 1522 return IRB.CreateAnd(A, IRB.CreateNot(Sa)); 1523 } 1524 } 1525 1526 /// \brief Build the highest possible value of V, taking into account V's 1527 /// uninitialized bits. 1528 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, 1529 bool isSigned) { 1530 if (isSigned) { 1531 // Split shadow into sign bit and other bits. 1532 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); 1533 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); 1534 // Minimise the undefined shadow bit, maximise other undefined bits. 1535 return 1536 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); 1537 } else { 1538 // Maximize undefined bits. 1539 return IRB.CreateOr(A, Sa); 1540 } 1541 } 1542 1543 /// \brief Instrument relational comparisons. 1544 /// 1545 /// This function does exact shadow propagation for all relational 1546 /// comparisons of integers, pointers and vectors of those. 1547 /// FIXME: output seems suboptimal when one of the operands is a constant 1548 void handleRelationalComparisonExact(ICmpInst &I) { 1549 IRBuilder<> IRB(&I); 1550 Value *A = I.getOperand(0); 1551 Value *B = I.getOperand(1); 1552 Value *Sa = getShadow(A); 1553 Value *Sb = getShadow(B); 1554 1555 // Get rid of pointers and vectors of pointers. 1556 // For ints (and vectors of ints), types of A and Sa match, 1557 // and this is a no-op. 1558 A = IRB.CreatePointerCast(A, Sa->getType()); 1559 B = IRB.CreatePointerCast(B, Sb->getType()); 1560 1561 // Let [a0, a1] be the interval of possible values of A, taking into account 1562 // its undefined bits. Let [b0, b1] be the interval of possible values of B. 1563 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). 1564 bool IsSigned = I.isSigned(); 1565 Value *S1 = IRB.CreateICmp(I.getPredicate(), 1566 getLowestPossibleValue(IRB, A, Sa, IsSigned), 1567 getHighestPossibleValue(IRB, B, Sb, IsSigned)); 1568 Value *S2 = IRB.CreateICmp(I.getPredicate(), 1569 getHighestPossibleValue(IRB, A, Sa, IsSigned), 1570 getLowestPossibleValue(IRB, B, Sb, IsSigned)); 1571 Value *Si = IRB.CreateXor(S1, S2); 1572 setShadow(&I, Si); 1573 setOriginForNaryOp(I); 1574 } 1575 1576 /// \brief Instrument signed relational comparisons. 1577 /// 1578 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by 1579 /// propagating the highest bit of the shadow. Everything else is delegated 1580 /// to handleShadowOr(). 1581 void handleSignedRelationalComparison(ICmpInst &I) { 1582 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); 1583 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); 1584 Value* op = nullptr; 1585 CmpInst::Predicate pre = I.getPredicate(); 1586 if (constOp0 && constOp0->isNullValue() && 1587 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { 1588 op = I.getOperand(1); 1589 } else if (constOp1 && constOp1->isNullValue() && 1590 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { 1591 op = I.getOperand(0); 1592 } 1593 if (op) { 1594 IRBuilder<> IRB(&I); 1595 Value* Shadow = 1596 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); 1597 setShadow(&I, Shadow); 1598 setOrigin(&I, getOrigin(op)); 1599 } else { 1600 handleShadowOr(I); 1601 } 1602 } 1603 1604 void visitICmpInst(ICmpInst &I) { 1605 if (!ClHandleICmp) { 1606 handleShadowOr(I); 1607 return; 1608 } 1609 if (I.isEquality()) { 1610 handleEqualityComparison(I); 1611 return; 1612 } 1613 1614 assert(I.isRelational()); 1615 if (ClHandleICmpExact) { 1616 handleRelationalComparisonExact(I); 1617 return; 1618 } 1619 if (I.isSigned()) { 1620 handleSignedRelationalComparison(I); 1621 return; 1622 } 1623 1624 assert(I.isUnsigned()); 1625 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { 1626 handleRelationalComparisonExact(I); 1627 return; 1628 } 1629 1630 handleShadowOr(I); 1631 } 1632 1633 void visitFCmpInst(FCmpInst &I) { 1634 handleShadowOr(I); 1635 } 1636 1637 void handleShift(BinaryOperator &I) { 1638 IRBuilder<> IRB(&I); 1639 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1640 // Otherwise perform the same shift on S1. 1641 Value *S1 = getShadow(&I, 0); 1642 Value *S2 = getShadow(&I, 1); 1643 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), 1644 S2->getType()); 1645 Value *V2 = I.getOperand(1); 1646 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); 1647 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1648 setOriginForNaryOp(I); 1649 } 1650 1651 void visitShl(BinaryOperator &I) { handleShift(I); } 1652 void visitAShr(BinaryOperator &I) { handleShift(I); } 1653 void visitLShr(BinaryOperator &I) { handleShift(I); } 1654 1655 /// \brief Instrument llvm.memmove 1656 /// 1657 /// At this point we don't know if llvm.memmove will be inlined or not. 1658 /// If we don't instrument it and it gets inlined, 1659 /// our interceptor will not kick in and we will lose the memmove. 1660 /// If we instrument the call here, but it does not get inlined, 1661 /// we will memove the shadow twice: which is bad in case 1662 /// of overlapping regions. So, we simply lower the intrinsic to a call. 1663 /// 1664 /// Similar situation exists for memcpy and memset. 1665 void visitMemMoveInst(MemMoveInst &I) { 1666 IRBuilder<> IRB(&I); 1667 IRB.CreateCall3( 1668 MS.MemmoveFn, 1669 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1670 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1671 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1672 I.eraseFromParent(); 1673 } 1674 1675 // Similar to memmove: avoid copying shadow twice. 1676 // This is somewhat unfortunate as it may slowdown small constant memcpys. 1677 // FIXME: consider doing manual inline for small constant sizes and proper 1678 // alignment. 1679 void visitMemCpyInst(MemCpyInst &I) { 1680 IRBuilder<> IRB(&I); 1681 IRB.CreateCall3( 1682 MS.MemcpyFn, 1683 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1684 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), 1685 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1686 I.eraseFromParent(); 1687 } 1688 1689 // Same as memcpy. 1690 void visitMemSetInst(MemSetInst &I) { 1691 IRBuilder<> IRB(&I); 1692 IRB.CreateCall3( 1693 MS.MemsetFn, 1694 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), 1695 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), 1696 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); 1697 I.eraseFromParent(); 1698 } 1699 1700 void visitVAStartInst(VAStartInst &I) { 1701 VAHelper->visitVAStartInst(I); 1702 } 1703 1704 void visitVACopyInst(VACopyInst &I) { 1705 VAHelper->visitVACopyInst(I); 1706 } 1707 1708 enum IntrinsicKind { 1709 IK_DoesNotAccessMemory, 1710 IK_OnlyReadsMemory, 1711 IK_WritesMemory 1712 }; 1713 1714 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { 1715 const int DoesNotAccessMemory = IK_DoesNotAccessMemory; 1716 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; 1717 const int OnlyReadsMemory = IK_OnlyReadsMemory; 1718 const int OnlyAccessesArgumentPointees = IK_WritesMemory; 1719 const int UnknownModRefBehavior = IK_WritesMemory; 1720 #define GET_INTRINSIC_MODREF_BEHAVIOR 1721 #define ModRefBehavior IntrinsicKind 1722 #include "llvm/IR/Intrinsics.gen" 1723 #undef ModRefBehavior 1724 #undef GET_INTRINSIC_MODREF_BEHAVIOR 1725 } 1726 1727 /// \brief Handle vector store-like intrinsics. 1728 /// 1729 /// Instrument intrinsics that look like a simple SIMD store: writes memory, 1730 /// has 1 pointer argument and 1 vector argument, returns void. 1731 bool handleVectorStoreIntrinsic(IntrinsicInst &I) { 1732 IRBuilder<> IRB(&I); 1733 Value* Addr = I.getArgOperand(0); 1734 Value *Shadow = getShadow(&I, 1); 1735 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); 1736 1737 // We don't know the pointer alignment (could be unaligned SSE store!). 1738 // Have to assume to worst case. 1739 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); 1740 1741 if (ClCheckAccessAddress) 1742 insertShadowCheck(Addr, &I); 1743 1744 // FIXME: use ClStoreCleanOrigin 1745 // FIXME: factor out common code from materializeStores 1746 if (MS.TrackOrigins) 1747 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB)); 1748 return true; 1749 } 1750 1751 /// \brief Handle vector load-like intrinsics. 1752 /// 1753 /// Instrument intrinsics that look like a simple SIMD load: reads memory, 1754 /// has 1 pointer argument, returns a vector. 1755 bool handleVectorLoadIntrinsic(IntrinsicInst &I) { 1756 IRBuilder<> IRB(&I); 1757 Value *Addr = I.getArgOperand(0); 1758 1759 Type *ShadowTy = getShadowTy(&I); 1760 if (PropagateShadow) { 1761 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); 1762 // We don't know the pointer alignment (could be unaligned SSE load!). 1763 // Have to assume to worst case. 1764 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); 1765 } else { 1766 setShadow(&I, getCleanShadow(&I)); 1767 } 1768 1769 if (ClCheckAccessAddress) 1770 insertShadowCheck(Addr, &I); 1771 1772 if (MS.TrackOrigins) { 1773 if (PropagateShadow) 1774 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB))); 1775 else 1776 setOrigin(&I, getCleanOrigin()); 1777 } 1778 return true; 1779 } 1780 1781 /// \brief Handle (SIMD arithmetic)-like intrinsics. 1782 /// 1783 /// Instrument intrinsics with any number of arguments of the same type, 1784 /// equal to the return type. The type should be simple (no aggregates or 1785 /// pointers; vectors are fine). 1786 /// Caller guarantees that this intrinsic does not access memory. 1787 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { 1788 Type *RetTy = I.getType(); 1789 if (!(RetTy->isIntOrIntVectorTy() || 1790 RetTy->isFPOrFPVectorTy() || 1791 RetTy->isX86_MMXTy())) 1792 return false; 1793 1794 unsigned NumArgOperands = I.getNumArgOperands(); 1795 1796 for (unsigned i = 0; i < NumArgOperands; ++i) { 1797 Type *Ty = I.getArgOperand(i)->getType(); 1798 if (Ty != RetTy) 1799 return false; 1800 } 1801 1802 IRBuilder<> IRB(&I); 1803 ShadowAndOriginCombiner SC(this, IRB); 1804 for (unsigned i = 0; i < NumArgOperands; ++i) 1805 SC.Add(I.getArgOperand(i)); 1806 SC.Done(&I); 1807 1808 return true; 1809 } 1810 1811 /// \brief Heuristically instrument unknown intrinsics. 1812 /// 1813 /// The main purpose of this code is to do something reasonable with all 1814 /// random intrinsics we might encounter, most importantly - SIMD intrinsics. 1815 /// We recognize several classes of intrinsics by their argument types and 1816 /// ModRefBehaviour and apply special intrumentation when we are reasonably 1817 /// sure that we know what the intrinsic does. 1818 /// 1819 /// We special-case intrinsics where this approach fails. See llvm.bswap 1820 /// handling as an example of that. 1821 bool handleUnknownIntrinsic(IntrinsicInst &I) { 1822 unsigned NumArgOperands = I.getNumArgOperands(); 1823 if (NumArgOperands == 0) 1824 return false; 1825 1826 Intrinsic::ID iid = I.getIntrinsicID(); 1827 IntrinsicKind IK = getIntrinsicKind(iid); 1828 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; 1829 bool WritesMemory = IK == IK_WritesMemory; 1830 assert(!(OnlyReadsMemory && WritesMemory)); 1831 1832 if (NumArgOperands == 2 && 1833 I.getArgOperand(0)->getType()->isPointerTy() && 1834 I.getArgOperand(1)->getType()->isVectorTy() && 1835 I.getType()->isVoidTy() && 1836 WritesMemory) { 1837 // This looks like a vector store. 1838 return handleVectorStoreIntrinsic(I); 1839 } 1840 1841 if (NumArgOperands == 1 && 1842 I.getArgOperand(0)->getType()->isPointerTy() && 1843 I.getType()->isVectorTy() && 1844 OnlyReadsMemory) { 1845 // This looks like a vector load. 1846 return handleVectorLoadIntrinsic(I); 1847 } 1848 1849 if (!OnlyReadsMemory && !WritesMemory) 1850 if (maybeHandleSimpleNomemIntrinsic(I)) 1851 return true; 1852 1853 // FIXME: detect and handle SSE maskstore/maskload 1854 return false; 1855 } 1856 1857 void handleBswap(IntrinsicInst &I) { 1858 IRBuilder<> IRB(&I); 1859 Value *Op = I.getArgOperand(0); 1860 Type *OpType = Op->getType(); 1861 Function *BswapFunc = Intrinsic::getDeclaration( 1862 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1)); 1863 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); 1864 setOrigin(&I, getOrigin(Op)); 1865 } 1866 1867 // \brief Instrument vector convert instrinsic. 1868 // 1869 // This function instruments intrinsics like cvtsi2ss: 1870 // %Out = int_xxx_cvtyyy(%ConvertOp) 1871 // or 1872 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp) 1873 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same 1874 // number \p Out elements, and (if has 2 arguments) copies the rest of the 1875 // elements from \p CopyOp. 1876 // In most cases conversion involves floating-point value which may trigger a 1877 // hardware exception when not fully initialized. For this reason we require 1878 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise. 1879 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p 1880 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always 1881 // return a fully initialized value. 1882 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) { 1883 IRBuilder<> IRB(&I); 1884 Value *CopyOp, *ConvertOp; 1885 1886 switch (I.getNumArgOperands()) { 1887 case 2: 1888 CopyOp = I.getArgOperand(0); 1889 ConvertOp = I.getArgOperand(1); 1890 break; 1891 case 1: 1892 ConvertOp = I.getArgOperand(0); 1893 CopyOp = nullptr; 1894 break; 1895 default: 1896 llvm_unreachable("Cvt intrinsic with unsupported number of arguments."); 1897 } 1898 1899 // The first *NumUsedElements* elements of ConvertOp are converted to the 1900 // same number of output elements. The rest of the output is copied from 1901 // CopyOp, or (if not available) filled with zeroes. 1902 // Combine shadow for elements of ConvertOp that are used in this operation, 1903 // and insert a check. 1904 // FIXME: consider propagating shadow of ConvertOp, at least in the case of 1905 // int->any conversion. 1906 Value *ConvertShadow = getShadow(ConvertOp); 1907 Value *AggShadow = nullptr; 1908 if (ConvertOp->getType()->isVectorTy()) { 1909 AggShadow = IRB.CreateExtractElement( 1910 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); 1911 for (int i = 1; i < NumUsedElements; ++i) { 1912 Value *MoreShadow = IRB.CreateExtractElement( 1913 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i)); 1914 AggShadow = IRB.CreateOr(AggShadow, MoreShadow); 1915 } 1916 } else { 1917 AggShadow = ConvertShadow; 1918 } 1919 assert(AggShadow->getType()->isIntegerTy()); 1920 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I); 1921 1922 // Build result shadow by zero-filling parts of CopyOp shadow that come from 1923 // ConvertOp. 1924 if (CopyOp) { 1925 assert(CopyOp->getType() == I.getType()); 1926 assert(CopyOp->getType()->isVectorTy()); 1927 Value *ResultShadow = getShadow(CopyOp); 1928 Type *EltTy = ResultShadow->getType()->getVectorElementType(); 1929 for (int i = 0; i < NumUsedElements; ++i) { 1930 ResultShadow = IRB.CreateInsertElement( 1931 ResultShadow, ConstantInt::getNullValue(EltTy), 1932 ConstantInt::get(IRB.getInt32Ty(), i)); 1933 } 1934 setShadow(&I, ResultShadow); 1935 setOrigin(&I, getOrigin(CopyOp)); 1936 } else { 1937 setShadow(&I, getCleanShadow(&I)); 1938 } 1939 } 1940 1941 // Given a scalar or vector, extract lower 64 bits (or less), and return all 1942 // zeroes if it is zero, and all ones otherwise. 1943 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { 1944 if (S->getType()->isVectorTy()) 1945 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); 1946 assert(S->getType()->getPrimitiveSizeInBits() <= 64); 1947 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 1948 return CreateShadowCast(IRB, S2, T, /* Signed */ true); 1949 } 1950 1951 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { 1952 Type *T = S->getType(); 1953 assert(T->isVectorTy()); 1954 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); 1955 return IRB.CreateSExt(S2, T); 1956 } 1957 1958 // \brief Instrument vector shift instrinsic. 1959 // 1960 // This function instruments intrinsics like int_x86_avx2_psll_w. 1961 // Intrinsic shifts %In by %ShiftSize bits. 1962 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift 1963 // size, and the rest is ignored. Behavior is defined even if shift size is 1964 // greater than register (or field) width. 1965 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { 1966 assert(I.getNumArgOperands() == 2); 1967 IRBuilder<> IRB(&I); 1968 // If any of the S2 bits are poisoned, the whole thing is poisoned. 1969 // Otherwise perform the same shift on S1. 1970 Value *S1 = getShadow(&I, 0); 1971 Value *S2 = getShadow(&I, 1); 1972 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) 1973 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); 1974 Value *V1 = I.getOperand(0); 1975 Value *V2 = I.getOperand(1); 1976 Value *Shift = IRB.CreateCall2(I.getCalledValue(), 1977 IRB.CreateBitCast(S1, V1->getType()), V2); 1978 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); 1979 setShadow(&I, IRB.CreateOr(Shift, S2Conv)); 1980 setOriginForNaryOp(I); 1981 } 1982 1983 // \brief Get an X86_MMX-sized vector type. 1984 Type *getMMXVectorTy(unsigned EltSizeInBits) { 1985 const unsigned X86_MMXSizeInBits = 64; 1986 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits), 1987 X86_MMXSizeInBits / EltSizeInBits); 1988 } 1989 1990 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack 1991 // intrinsic. 1992 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) { 1993 switch (id) { 1994 case llvm::Intrinsic::x86_sse2_packsswb_128: 1995 case llvm::Intrinsic::x86_sse2_packuswb_128: 1996 return llvm::Intrinsic::x86_sse2_packsswb_128; 1997 1998 case llvm::Intrinsic::x86_sse2_packssdw_128: 1999 case llvm::Intrinsic::x86_sse41_packusdw: 2000 return llvm::Intrinsic::x86_sse2_packssdw_128; 2001 2002 case llvm::Intrinsic::x86_avx2_packsswb: 2003 case llvm::Intrinsic::x86_avx2_packuswb: 2004 return llvm::Intrinsic::x86_avx2_packsswb; 2005 2006 case llvm::Intrinsic::x86_avx2_packssdw: 2007 case llvm::Intrinsic::x86_avx2_packusdw: 2008 return llvm::Intrinsic::x86_avx2_packssdw; 2009 2010 case llvm::Intrinsic::x86_mmx_packsswb: 2011 case llvm::Intrinsic::x86_mmx_packuswb: 2012 return llvm::Intrinsic::x86_mmx_packsswb; 2013 2014 case llvm::Intrinsic::x86_mmx_packssdw: 2015 return llvm::Intrinsic::x86_mmx_packssdw; 2016 default: 2017 llvm_unreachable("unexpected intrinsic id"); 2018 } 2019 } 2020 2021 // \brief Instrument vector pack instrinsic. 2022 // 2023 // This function instruments intrinsics like x86_mmx_packsswb, that 2024 // packs elements of 2 input vectors into half as many bits with saturation. 2025 // Shadow is propagated with the signed variant of the same intrinsic applied 2026 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer). 2027 // EltSizeInBits is used only for x86mmx arguments. 2028 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { 2029 assert(I.getNumArgOperands() == 2); 2030 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2031 IRBuilder<> IRB(&I); 2032 Value *S1 = getShadow(&I, 0); 2033 Value *S2 = getShadow(&I, 1); 2034 assert(isX86_MMX || S1->getType()->isVectorTy()); 2035 2036 // SExt and ICmpNE below must apply to individual elements of input vectors. 2037 // In case of x86mmx arguments, cast them to appropriate vector types and 2038 // back. 2039 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType(); 2040 if (isX86_MMX) { 2041 S1 = IRB.CreateBitCast(S1, T); 2042 S2 = IRB.CreateBitCast(S2, T); 2043 } 2044 Value *S1_ext = IRB.CreateSExt( 2045 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T); 2046 Value *S2_ext = IRB.CreateSExt( 2047 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T); 2048 if (isX86_MMX) { 2049 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C); 2050 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy); 2051 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy); 2052 } 2053 2054 Function *ShadowFn = Intrinsic::getDeclaration( 2055 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID())); 2056 2057 Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack"); 2058 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I)); 2059 setShadow(&I, S); 2060 setOriginForNaryOp(I); 2061 } 2062 2063 // \brief Instrument sum-of-absolute-differencies intrinsic. 2064 void handleVectorSadIntrinsic(IntrinsicInst &I) { 2065 const unsigned SignificantBitsPerResultElement = 16; 2066 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2067 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType(); 2068 unsigned ZeroBitsPerResultElement = 2069 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement; 2070 2071 IRBuilder<> IRB(&I); 2072 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); 2073 S = IRB.CreateBitCast(S, ResTy); 2074 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), 2075 ResTy); 2076 S = IRB.CreateLShr(S, ZeroBitsPerResultElement); 2077 S = IRB.CreateBitCast(S, getShadowTy(&I)); 2078 setShadow(&I, S); 2079 setOriginForNaryOp(I); 2080 } 2081 2082 // \brief Instrument multiply-add intrinsic. 2083 void handleVectorPmaddIntrinsic(IntrinsicInst &I, 2084 unsigned EltSizeInBits = 0) { 2085 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); 2086 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType(); 2087 IRBuilder<> IRB(&I); 2088 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); 2089 S = IRB.CreateBitCast(S, ResTy); 2090 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), 2091 ResTy); 2092 S = IRB.CreateBitCast(S, getShadowTy(&I)); 2093 setShadow(&I, S); 2094 setOriginForNaryOp(I); 2095 } 2096 2097 void visitIntrinsicInst(IntrinsicInst &I) { 2098 switch (I.getIntrinsicID()) { 2099 case llvm::Intrinsic::bswap: 2100 handleBswap(I); 2101 break; 2102 case llvm::Intrinsic::x86_avx512_cvtsd2usi64: 2103 case llvm::Intrinsic::x86_avx512_cvtsd2usi: 2104 case llvm::Intrinsic::x86_avx512_cvtss2usi64: 2105 case llvm::Intrinsic::x86_avx512_cvtss2usi: 2106 case llvm::Intrinsic::x86_avx512_cvttss2usi64: 2107 case llvm::Intrinsic::x86_avx512_cvttss2usi: 2108 case llvm::Intrinsic::x86_avx512_cvttsd2usi64: 2109 case llvm::Intrinsic::x86_avx512_cvttsd2usi: 2110 case llvm::Intrinsic::x86_avx512_cvtusi2sd: 2111 case llvm::Intrinsic::x86_avx512_cvtusi2ss: 2112 case llvm::Intrinsic::x86_avx512_cvtusi642sd: 2113 case llvm::Intrinsic::x86_avx512_cvtusi642ss: 2114 case llvm::Intrinsic::x86_sse2_cvtsd2si64: 2115 case llvm::Intrinsic::x86_sse2_cvtsd2si: 2116 case llvm::Intrinsic::x86_sse2_cvtsd2ss: 2117 case llvm::Intrinsic::x86_sse2_cvtsi2sd: 2118 case llvm::Intrinsic::x86_sse2_cvtsi642sd: 2119 case llvm::Intrinsic::x86_sse2_cvtss2sd: 2120 case llvm::Intrinsic::x86_sse2_cvttsd2si64: 2121 case llvm::Intrinsic::x86_sse2_cvttsd2si: 2122 case llvm::Intrinsic::x86_sse_cvtsi2ss: 2123 case llvm::Intrinsic::x86_sse_cvtsi642ss: 2124 case llvm::Intrinsic::x86_sse_cvtss2si64: 2125 case llvm::Intrinsic::x86_sse_cvtss2si: 2126 case llvm::Intrinsic::x86_sse_cvttss2si64: 2127 case llvm::Intrinsic::x86_sse_cvttss2si: 2128 handleVectorConvertIntrinsic(I, 1); 2129 break; 2130 case llvm::Intrinsic::x86_sse2_cvtdq2pd: 2131 case llvm::Intrinsic::x86_sse2_cvtps2pd: 2132 case llvm::Intrinsic::x86_sse_cvtps2pi: 2133 case llvm::Intrinsic::x86_sse_cvttps2pi: 2134 handleVectorConvertIntrinsic(I, 2); 2135 break; 2136 case llvm::Intrinsic::x86_avx512_psll_dq: 2137 case llvm::Intrinsic::x86_avx512_psrl_dq: 2138 case llvm::Intrinsic::x86_avx2_psll_w: 2139 case llvm::Intrinsic::x86_avx2_psll_d: 2140 case llvm::Intrinsic::x86_avx2_psll_q: 2141 case llvm::Intrinsic::x86_avx2_pslli_w: 2142 case llvm::Intrinsic::x86_avx2_pslli_d: 2143 case llvm::Intrinsic::x86_avx2_pslli_q: 2144 case llvm::Intrinsic::x86_avx2_psll_dq: 2145 case llvm::Intrinsic::x86_avx2_psrl_w: 2146 case llvm::Intrinsic::x86_avx2_psrl_d: 2147 case llvm::Intrinsic::x86_avx2_psrl_q: 2148 case llvm::Intrinsic::x86_avx2_psra_w: 2149 case llvm::Intrinsic::x86_avx2_psra_d: 2150 case llvm::Intrinsic::x86_avx2_psrli_w: 2151 case llvm::Intrinsic::x86_avx2_psrli_d: 2152 case llvm::Intrinsic::x86_avx2_psrli_q: 2153 case llvm::Intrinsic::x86_avx2_psrai_w: 2154 case llvm::Intrinsic::x86_avx2_psrai_d: 2155 case llvm::Intrinsic::x86_avx2_psrl_dq: 2156 case llvm::Intrinsic::x86_sse2_psll_w: 2157 case llvm::Intrinsic::x86_sse2_psll_d: 2158 case llvm::Intrinsic::x86_sse2_psll_q: 2159 case llvm::Intrinsic::x86_sse2_pslli_w: 2160 case llvm::Intrinsic::x86_sse2_pslli_d: 2161 case llvm::Intrinsic::x86_sse2_pslli_q: 2162 case llvm::Intrinsic::x86_sse2_psll_dq: 2163 case llvm::Intrinsic::x86_sse2_psrl_w: 2164 case llvm::Intrinsic::x86_sse2_psrl_d: 2165 case llvm::Intrinsic::x86_sse2_psrl_q: 2166 case llvm::Intrinsic::x86_sse2_psra_w: 2167 case llvm::Intrinsic::x86_sse2_psra_d: 2168 case llvm::Intrinsic::x86_sse2_psrli_w: 2169 case llvm::Intrinsic::x86_sse2_psrli_d: 2170 case llvm::Intrinsic::x86_sse2_psrli_q: 2171 case llvm::Intrinsic::x86_sse2_psrai_w: 2172 case llvm::Intrinsic::x86_sse2_psrai_d: 2173 case llvm::Intrinsic::x86_sse2_psrl_dq: 2174 case llvm::Intrinsic::x86_mmx_psll_w: 2175 case llvm::Intrinsic::x86_mmx_psll_d: 2176 case llvm::Intrinsic::x86_mmx_psll_q: 2177 case llvm::Intrinsic::x86_mmx_pslli_w: 2178 case llvm::Intrinsic::x86_mmx_pslli_d: 2179 case llvm::Intrinsic::x86_mmx_pslli_q: 2180 case llvm::Intrinsic::x86_mmx_psrl_w: 2181 case llvm::Intrinsic::x86_mmx_psrl_d: 2182 case llvm::Intrinsic::x86_mmx_psrl_q: 2183 case llvm::Intrinsic::x86_mmx_psra_w: 2184 case llvm::Intrinsic::x86_mmx_psra_d: 2185 case llvm::Intrinsic::x86_mmx_psrli_w: 2186 case llvm::Intrinsic::x86_mmx_psrli_d: 2187 case llvm::Intrinsic::x86_mmx_psrli_q: 2188 case llvm::Intrinsic::x86_mmx_psrai_w: 2189 case llvm::Intrinsic::x86_mmx_psrai_d: 2190 handleVectorShiftIntrinsic(I, /* Variable */ false); 2191 break; 2192 case llvm::Intrinsic::x86_avx2_psllv_d: 2193 case llvm::Intrinsic::x86_avx2_psllv_d_256: 2194 case llvm::Intrinsic::x86_avx2_psllv_q: 2195 case llvm::Intrinsic::x86_avx2_psllv_q_256: 2196 case llvm::Intrinsic::x86_avx2_psrlv_d: 2197 case llvm::Intrinsic::x86_avx2_psrlv_d_256: 2198 case llvm::Intrinsic::x86_avx2_psrlv_q: 2199 case llvm::Intrinsic::x86_avx2_psrlv_q_256: 2200 case llvm::Intrinsic::x86_avx2_psrav_d: 2201 case llvm::Intrinsic::x86_avx2_psrav_d_256: 2202 handleVectorShiftIntrinsic(I, /* Variable */ true); 2203 break; 2204 2205 // Byte shifts are not implemented. 2206 // case llvm::Intrinsic::x86_avx512_psll_dq_bs: 2207 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs: 2208 // case llvm::Intrinsic::x86_avx2_psll_dq_bs: 2209 // case llvm::Intrinsic::x86_avx2_psrl_dq_bs: 2210 // case llvm::Intrinsic::x86_sse2_psll_dq_bs: 2211 // case llvm::Intrinsic::x86_sse2_psrl_dq_bs: 2212 2213 case llvm::Intrinsic::x86_sse2_packsswb_128: 2214 case llvm::Intrinsic::x86_sse2_packssdw_128: 2215 case llvm::Intrinsic::x86_sse2_packuswb_128: 2216 case llvm::Intrinsic::x86_sse41_packusdw: 2217 case llvm::Intrinsic::x86_avx2_packsswb: 2218 case llvm::Intrinsic::x86_avx2_packssdw: 2219 case llvm::Intrinsic::x86_avx2_packuswb: 2220 case llvm::Intrinsic::x86_avx2_packusdw: 2221 handleVectorPackIntrinsic(I); 2222 break; 2223 2224 case llvm::Intrinsic::x86_mmx_packsswb: 2225 case llvm::Intrinsic::x86_mmx_packuswb: 2226 handleVectorPackIntrinsic(I, 16); 2227 break; 2228 2229 case llvm::Intrinsic::x86_mmx_packssdw: 2230 handleVectorPackIntrinsic(I, 32); 2231 break; 2232 2233 case llvm::Intrinsic::x86_mmx_psad_bw: 2234 case llvm::Intrinsic::x86_sse2_psad_bw: 2235 case llvm::Intrinsic::x86_avx2_psad_bw: 2236 handleVectorSadIntrinsic(I); 2237 break; 2238 2239 case llvm::Intrinsic::x86_sse2_pmadd_wd: 2240 case llvm::Intrinsic::x86_avx2_pmadd_wd: 2241 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128: 2242 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw: 2243 handleVectorPmaddIntrinsic(I); 2244 break; 2245 2246 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw: 2247 handleVectorPmaddIntrinsic(I, 8); 2248 break; 2249 2250 case llvm::Intrinsic::x86_mmx_pmadd_wd: 2251 handleVectorPmaddIntrinsic(I, 16); 2252 break; 2253 2254 default: 2255 if (!handleUnknownIntrinsic(I)) 2256 visitInstruction(I); 2257 break; 2258 } 2259 } 2260 2261 void visitCallSite(CallSite CS) { 2262 Instruction &I = *CS.getInstruction(); 2263 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); 2264 if (CS.isCall()) { 2265 CallInst *Call = cast<CallInst>(&I); 2266 2267 // For inline asm, do the usual thing: check argument shadow and mark all 2268 // outputs as clean. Note that any side effects of the inline asm that are 2269 // not immediately visible in its constraints are not handled. 2270 if (Call->isInlineAsm()) { 2271 visitInstruction(I); 2272 return; 2273 } 2274 2275 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); 2276 2277 // We are going to insert code that relies on the fact that the callee 2278 // will become a non-readonly function after it is instrumented by us. To 2279 // prevent this code from being optimized out, mark that function 2280 // non-readonly in advance. 2281 if (Function *Func = Call->getCalledFunction()) { 2282 // Clear out readonly/readnone attributes. 2283 AttrBuilder B; 2284 B.addAttribute(Attribute::ReadOnly) 2285 .addAttribute(Attribute::ReadNone); 2286 Func->removeAttributes(AttributeSet::FunctionIndex, 2287 AttributeSet::get(Func->getContext(), 2288 AttributeSet::FunctionIndex, 2289 B)); 2290 } 2291 } 2292 IRBuilder<> IRB(&I); 2293 2294 if (MS.WrapIndirectCalls && !CS.getCalledFunction()) 2295 IndirectCallList.push_back(CS); 2296 2297 unsigned ArgOffset = 0; 2298 DEBUG(dbgs() << " CallSite: " << I << "\n"); 2299 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2300 ArgIt != End; ++ArgIt) { 2301 Value *A = *ArgIt; 2302 unsigned i = ArgIt - CS.arg_begin(); 2303 if (!A->getType()->isSized()) { 2304 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); 2305 continue; 2306 } 2307 unsigned Size = 0; 2308 Value *Store = nullptr; 2309 // Compute the Shadow for arg even if it is ByVal, because 2310 // in that case getShadow() will copy the actual arg shadow to 2311 // __msan_param_tls. 2312 Value *ArgShadow = getShadow(A); 2313 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); 2314 DEBUG(dbgs() << " Arg#" << i << ": " << *A << 2315 " Shadow: " << *ArgShadow << "\n"); 2316 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { 2317 assert(A->getType()->isPointerTy() && 2318 "ByVal argument is not a pointer!"); 2319 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType()); 2320 unsigned Alignment = CS.getParamAlignment(i + 1); 2321 Store = IRB.CreateMemCpy(ArgShadowBase, 2322 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), 2323 Size, Alignment); 2324 } else { 2325 Size = MS.DL->getTypeAllocSize(A->getType()); 2326 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, 2327 kShadowTLSAlignment); 2328 } 2329 if (MS.TrackOrigins) 2330 IRB.CreateStore(getOrigin(A), 2331 getOriginPtrForArgument(A, IRB, ArgOffset)); 2332 (void)Store; 2333 assert(Size != 0 && Store != nullptr); 2334 DEBUG(dbgs() << " Param:" << *Store << "\n"); 2335 ArgOffset += DataLayout::RoundUpAlignment(Size, 8); 2336 } 2337 DEBUG(dbgs() << " done with call args\n"); 2338 2339 FunctionType *FT = 2340 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); 2341 if (FT->isVarArg()) { 2342 VAHelper->visitCallSite(CS, IRB); 2343 } 2344 2345 // Now, get the shadow for the RetVal. 2346 if (!I.getType()->isSized()) return; 2347 IRBuilder<> IRBBefore(&I); 2348 // Until we have full dynamic coverage, make sure the retval shadow is 0. 2349 Value *Base = getShadowPtrForRetval(&I, IRBBefore); 2350 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); 2351 Instruction *NextInsn = nullptr; 2352 if (CS.isCall()) { 2353 NextInsn = I.getNextNode(); 2354 } else { 2355 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); 2356 if (!NormalDest->getSinglePredecessor()) { 2357 // FIXME: this case is tricky, so we are just conservative here. 2358 // Perhaps we need to split the edge between this BB and NormalDest, 2359 // but a naive attempt to use SplitEdge leads to a crash. 2360 setShadow(&I, getCleanShadow(&I)); 2361 setOrigin(&I, getCleanOrigin()); 2362 return; 2363 } 2364 NextInsn = NormalDest->getFirstInsertionPt(); 2365 assert(NextInsn && 2366 "Could not find insertion point for retval shadow load"); 2367 } 2368 IRBuilder<> IRBAfter(NextInsn); 2369 Value *RetvalShadow = 2370 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), 2371 kShadowTLSAlignment, "_msret"); 2372 setShadow(&I, RetvalShadow); 2373 if (MS.TrackOrigins) 2374 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); 2375 } 2376 2377 void visitReturnInst(ReturnInst &I) { 2378 IRBuilder<> IRB(&I); 2379 Value *RetVal = I.getReturnValue(); 2380 if (!RetVal) return; 2381 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); 2382 if (CheckReturnValue) { 2383 insertShadowCheck(RetVal, &I); 2384 Value *Shadow = getCleanShadow(RetVal); 2385 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2386 } else { 2387 Value *Shadow = getShadow(RetVal); 2388 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); 2389 // FIXME: make it conditional if ClStoreCleanOrigin==0 2390 if (MS.TrackOrigins) 2391 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); 2392 } 2393 } 2394 2395 void visitPHINode(PHINode &I) { 2396 IRBuilder<> IRB(&I); 2397 if (!PropagateShadow) { 2398 setShadow(&I, getCleanShadow(&I)); 2399 return; 2400 } 2401 2402 ShadowPHINodes.push_back(&I); 2403 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), 2404 "_msphi_s")); 2405 if (MS.TrackOrigins) 2406 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), 2407 "_msphi_o")); 2408 } 2409 2410 void visitAllocaInst(AllocaInst &I) { 2411 setShadow(&I, getCleanShadow(&I)); 2412 IRBuilder<> IRB(I.getNextNode()); 2413 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType()); 2414 if (PoisonStack && ClPoisonStackWithCall) { 2415 IRB.CreateCall2(MS.MsanPoisonStackFn, 2416 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2417 ConstantInt::get(MS.IntptrTy, Size)); 2418 } else { 2419 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); 2420 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); 2421 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment()); 2422 } 2423 2424 if (PoisonStack && MS.TrackOrigins) { 2425 setOrigin(&I, getCleanOrigin()); 2426 SmallString<2048> StackDescriptionStorage; 2427 raw_svector_ostream StackDescription(StackDescriptionStorage); 2428 // We create a string with a description of the stack allocation and 2429 // pass it into __msan_set_alloca_origin. 2430 // It will be printed by the run-time if stack-originated UMR is found. 2431 // The first 4 bytes of the string are set to '----' and will be replaced 2432 // by __msan_va_arg_overflow_size_tls at the first call. 2433 StackDescription << "----" << I.getName() << "@" << F.getName(); 2434 Value *Descr = 2435 createPrivateNonConstGlobalForString(*F.getParent(), 2436 StackDescription.str()); 2437 2438 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn, 2439 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), 2440 ConstantInt::get(MS.IntptrTy, Size), 2441 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), 2442 IRB.CreatePointerCast(&F, MS.IntptrTy)); 2443 } 2444 } 2445 2446 void visitSelectInst(SelectInst& I) { 2447 IRBuilder<> IRB(&I); 2448 // a = select b, c, d 2449 Value *B = I.getCondition(); 2450 Value *C = I.getTrueValue(); 2451 Value *D = I.getFalseValue(); 2452 Value *Sb = getShadow(B); 2453 Value *Sc = getShadow(C); 2454 Value *Sd = getShadow(D); 2455 2456 // Result shadow if condition shadow is 0. 2457 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); 2458 Value *Sa1; 2459 if (I.getType()->isAggregateType()) { 2460 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do 2461 // an extra "select". This results in much more compact IR. 2462 // Sa = select Sb, poisoned, (select b, Sc, Sd) 2463 Sa1 = getPoisonedShadow(getShadowTy(I.getType())); 2464 } else { 2465 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] 2466 // If Sb (condition is poisoned), look for bits in c and d that are equal 2467 // and both unpoisoned. 2468 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. 2469 2470 // Cast arguments to shadow-compatible type. 2471 C = CreateAppToShadowCast(IRB, C); 2472 D = CreateAppToShadowCast(IRB, D); 2473 2474 // Result shadow if condition shadow is 1. 2475 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); 2476 } 2477 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); 2478 setShadow(&I, Sa); 2479 if (MS.TrackOrigins) { 2480 // Origins are always i32, so any vector conditions must be flattened. 2481 // FIXME: consider tracking vector origins for app vectors? 2482 if (B->getType()->isVectorTy()) { 2483 Type *FlatTy = getShadowTyNoVec(B->getType()); 2484 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), 2485 ConstantInt::getNullValue(FlatTy)); 2486 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), 2487 ConstantInt::getNullValue(FlatTy)); 2488 } 2489 // a = select b, c, d 2490 // Oa = Sb ? Ob : (b ? Oc : Od) 2491 setOrigin(&I, IRB.CreateSelect( 2492 Sb, getOrigin(I.getCondition()), 2493 IRB.CreateSelect(B, getOrigin(C), getOrigin(D)))); 2494 } 2495 } 2496 2497 void visitLandingPadInst(LandingPadInst &I) { 2498 // Do nothing. 2499 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 2500 setShadow(&I, getCleanShadow(&I)); 2501 setOrigin(&I, getCleanOrigin()); 2502 } 2503 2504 void visitGetElementPtrInst(GetElementPtrInst &I) { 2505 handleShadowOr(I); 2506 } 2507 2508 void visitExtractValueInst(ExtractValueInst &I) { 2509 IRBuilder<> IRB(&I); 2510 Value *Agg = I.getAggregateOperand(); 2511 DEBUG(dbgs() << "ExtractValue: " << I << "\n"); 2512 Value *AggShadow = getShadow(Agg); 2513 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2514 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); 2515 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); 2516 setShadow(&I, ResShadow); 2517 setOriginForNaryOp(I); 2518 } 2519 2520 void visitInsertValueInst(InsertValueInst &I) { 2521 IRBuilder<> IRB(&I); 2522 DEBUG(dbgs() << "InsertValue: " << I << "\n"); 2523 Value *AggShadow = getShadow(I.getAggregateOperand()); 2524 Value *InsShadow = getShadow(I.getInsertedValueOperand()); 2525 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); 2526 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); 2527 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); 2528 DEBUG(dbgs() << " Res: " << *Res << "\n"); 2529 setShadow(&I, Res); 2530 setOriginForNaryOp(I); 2531 } 2532 2533 void dumpInst(Instruction &I) { 2534 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2535 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; 2536 } else { 2537 errs() << "ZZZ " << I.getOpcodeName() << "\n"; 2538 } 2539 errs() << "QQQ " << I << "\n"; 2540 } 2541 2542 void visitResumeInst(ResumeInst &I) { 2543 DEBUG(dbgs() << "Resume: " << I << "\n"); 2544 // Nothing to do here. 2545 } 2546 2547 void visitInstruction(Instruction &I) { 2548 // Everything else: stop propagating and check for poisoned shadow. 2549 if (ClDumpStrictInstructions) 2550 dumpInst(I); 2551 DEBUG(dbgs() << "DEFAULT: " << I << "\n"); 2552 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) 2553 insertShadowCheck(I.getOperand(i), &I); 2554 setShadow(&I, getCleanShadow(&I)); 2555 setOrigin(&I, getCleanOrigin()); 2556 } 2557 }; 2558 2559 /// \brief AMD64-specific implementation of VarArgHelper. 2560 struct VarArgAMD64Helper : public VarArgHelper { 2561 // An unfortunate workaround for asymmetric lowering of va_arg stuff. 2562 // See a comment in visitCallSite for more details. 2563 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 2564 static const unsigned AMD64FpEndOffset = 176; 2565 2566 Function &F; 2567 MemorySanitizer &MS; 2568 MemorySanitizerVisitor &MSV; 2569 Value *VAArgTLSCopy; 2570 Value *VAArgOverflowSize; 2571 2572 SmallVector<CallInst*, 16> VAStartInstrumentationList; 2573 2574 VarArgAMD64Helper(Function &F, MemorySanitizer &MS, 2575 MemorySanitizerVisitor &MSV) 2576 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), 2577 VAArgOverflowSize(nullptr) {} 2578 2579 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; 2580 2581 ArgKind classifyArgument(Value* arg) { 2582 // A very rough approximation of X86_64 argument classification rules. 2583 Type *T = arg->getType(); 2584 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) 2585 return AK_FloatingPoint; 2586 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) 2587 return AK_GeneralPurpose; 2588 if (T->isPointerTy()) 2589 return AK_GeneralPurpose; 2590 return AK_Memory; 2591 } 2592 2593 // For VarArg functions, store the argument shadow in an ABI-specific format 2594 // that corresponds to va_list layout. 2595 // We do this because Clang lowers va_arg in the frontend, and this pass 2596 // only sees the low level code that deals with va_list internals. 2597 // A much easier alternative (provided that Clang emits va_arg instructions) 2598 // would have been to associate each live instance of va_list with a copy of 2599 // MSanParamTLS, and extract shadow on va_arg() call in the argument list 2600 // order. 2601 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { 2602 unsigned GpOffset = 0; 2603 unsigned FpOffset = AMD64GpEndOffset; 2604 unsigned OverflowOffset = AMD64FpEndOffset; 2605 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); 2606 ArgIt != End; ++ArgIt) { 2607 Value *A = *ArgIt; 2608 unsigned ArgNo = CS.getArgumentNo(ArgIt); 2609 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); 2610 if (IsByVal) { 2611 // ByVal arguments always go to the overflow area. 2612 assert(A->getType()->isPointerTy()); 2613 Type *RealTy = A->getType()->getPointerElementType(); 2614 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy); 2615 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); 2616 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 2617 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), 2618 ArgSize, kShadowTLSAlignment); 2619 } else { 2620 ArgKind AK = classifyArgument(A); 2621 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) 2622 AK = AK_Memory; 2623 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) 2624 AK = AK_Memory; 2625 Value *Base; 2626 switch (AK) { 2627 case AK_GeneralPurpose: 2628 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); 2629 GpOffset += 8; 2630 break; 2631 case AK_FloatingPoint: 2632 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); 2633 FpOffset += 16; 2634 break; 2635 case AK_Memory: 2636 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); 2637 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); 2638 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); 2639 } 2640 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); 2641 } 2642 } 2643 Constant *OverflowSize = 2644 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); 2645 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); 2646 } 2647 2648 /// \brief Compute the shadow address for a given va_arg. 2649 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, 2650 int ArgOffset) { 2651 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); 2652 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); 2653 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), 2654 "_msarg"); 2655 } 2656 2657 void visitVAStartInst(VAStartInst &I) override { 2658 IRBuilder<> IRB(&I); 2659 VAStartInstrumentationList.push_back(&I); 2660 Value *VAListTag = I.getArgOperand(0); 2661 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2662 2663 // Unpoison the whole __va_list_tag. 2664 // FIXME: magic ABI constants. 2665 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2666 /* size */24, /* alignment */8, false); 2667 } 2668 2669 void visitVACopyInst(VACopyInst &I) override { 2670 IRBuilder<> IRB(&I); 2671 Value *VAListTag = I.getArgOperand(0); 2672 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); 2673 2674 // Unpoison the whole __va_list_tag. 2675 // FIXME: magic ABI constants. 2676 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), 2677 /* size */24, /* alignment */8, false); 2678 } 2679 2680 void finalizeInstrumentation() override { 2681 assert(!VAArgOverflowSize && !VAArgTLSCopy && 2682 "finalizeInstrumentation called twice"); 2683 if (!VAStartInstrumentationList.empty()) { 2684 // If there is a va_start in this function, make a backup copy of 2685 // va_arg_tls somewhere in the function entry block. 2686 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 2687 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); 2688 Value *CopySize = 2689 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 2690 VAArgOverflowSize); 2691 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); 2692 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); 2693 } 2694 2695 // Instrument va_start. 2696 // Copy va_list shadow from the backup copy of the TLS contents. 2697 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { 2698 CallInst *OrigInst = VAStartInstrumentationList[i]; 2699 IRBuilder<> IRB(OrigInst->getNextNode()); 2700 Value *VAListTag = OrigInst->getArgOperand(0); 2701 2702 Value *RegSaveAreaPtrPtr = 2703 IRB.CreateIntToPtr( 2704 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2705 ConstantInt::get(MS.IntptrTy, 16)), 2706 Type::getInt64PtrTy(*MS.C)); 2707 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); 2708 Value *RegSaveAreaShadowPtr = 2709 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); 2710 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, 2711 AMD64FpEndOffset, 16); 2712 2713 Value *OverflowArgAreaPtrPtr = 2714 IRB.CreateIntToPtr( 2715 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), 2716 ConstantInt::get(MS.IntptrTy, 8)), 2717 Type::getInt64PtrTy(*MS.C)); 2718 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); 2719 Value *OverflowArgAreaShadowPtr = 2720 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); 2721 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset); 2722 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); 2723 } 2724 } 2725 }; 2726 2727 /// \brief A no-op implementation of VarArgHelper. 2728 struct VarArgNoOpHelper : public VarArgHelper { 2729 VarArgNoOpHelper(Function &F, MemorySanitizer &MS, 2730 MemorySanitizerVisitor &MSV) {} 2731 2732 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} 2733 2734 void visitVAStartInst(VAStartInst &I) override {} 2735 2736 void visitVACopyInst(VACopyInst &I) override {} 2737 2738 void finalizeInstrumentation() override {} 2739 }; 2740 2741 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, 2742 MemorySanitizerVisitor &Visitor) { 2743 // VarArg handling is only implemented on AMD64. False positives are possible 2744 // on other platforms. 2745 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); 2746 if (TargetTriple.getArch() == llvm::Triple::x86_64) 2747 return new VarArgAMD64Helper(Func, Msan, Visitor); 2748 else 2749 return new VarArgNoOpHelper(Func, Msan, Visitor); 2750 } 2751 2752 } // namespace 2753 2754 bool MemorySanitizer::runOnFunction(Function &F) { 2755 MemorySanitizerVisitor Visitor(F, *this); 2756 2757 // Clear out readonly/readnone attributes. 2758 AttrBuilder B; 2759 B.addAttribute(Attribute::ReadOnly) 2760 .addAttribute(Attribute::ReadNone); 2761 F.removeAttributes(AttributeSet::FunctionIndex, 2762 AttributeSet::get(F.getContext(), 2763 AttributeSet::FunctionIndex, B)); 2764 2765 return Visitor.runOnFunction(); 2766 } 2767