1 //===-- tsan_interceptors.cc ----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 // FIXME: move as many interceptors as possible into 13 // sanitizer_common/sanitizer_common_interceptors.inc 14 //===----------------------------------------------------------------------===// 15 16 #include "sanitizer_common/sanitizer_atomic.h" 17 #include "sanitizer_common/sanitizer_libc.h" 18 #include "sanitizer_common/sanitizer_linux.h" 19 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 20 #include "sanitizer_common/sanitizer_placement_new.h" 21 #include "sanitizer_common/sanitizer_stacktrace.h" 22 #include "interception/interception.h" 23 #include "tsan_interceptors.h" 24 #include "tsan_interface.h" 25 #include "tsan_platform.h" 26 #include "tsan_suppressions.h" 27 #include "tsan_rtl.h" 28 #include "tsan_mman.h" 29 #include "tsan_fd.h" 30 31 #if SANITIZER_POSIX 32 #include "sanitizer_common/sanitizer_posix.h" 33 #endif 34 35 using namespace __tsan; // NOLINT 36 37 #if SANITIZER_FREEBSD || SANITIZER_MAC 38 #define __errno_location __error 39 #define stdout __stdoutp 40 #define stderr __stderrp 41 #endif 42 43 #if SANITIZER_FREEBSD 44 #define __libc_realloc __realloc 45 #define __libc_calloc __calloc 46 #elif SANITIZER_MAC 47 #define __libc_malloc REAL(malloc) 48 #define __libc_realloc REAL(realloc) 49 #define __libc_calloc REAL(calloc) 50 #define __libc_free REAL(free) 51 #elif SANITIZER_ANDROID 52 #define __errno_location __errno 53 #define __libc_malloc REAL(malloc) 54 #define __libc_realloc REAL(realloc) 55 #define __libc_calloc REAL(calloc) 56 #define __libc_free REAL(free) 57 #define mallopt(a, b) 58 #endif 59 60 #if SANITIZER_LINUX || SANITIZER_FREEBSD 61 #define PTHREAD_CREATE_DETACHED 1 62 #elif SANITIZER_MAC 63 #define PTHREAD_CREATE_DETACHED 2 64 #endif 65 66 67 #ifdef __mips__ 68 const int kSigCount = 129; 69 #else 70 const int kSigCount = 65; 71 #endif 72 73 struct my_siginfo_t { 74 // The size is determined by looking at sizeof of real siginfo_t on linux. 75 u64 opaque[128 / sizeof(u64)]; 76 }; 77 78 #ifdef __mips__ 79 struct ucontext_t { 80 u64 opaque[768 / sizeof(u64) + 1]; 81 }; 82 #else 83 struct ucontext_t { 84 // The size is determined by looking at sizeof of real ucontext_t on linux. 85 u64 opaque[936 / sizeof(u64) + 1]; 86 }; 87 #endif 88 89 #if defined(__x86_64__) || defined(__mips__) \ 90 || (defined(__powerpc64__) && defined(__BIG_ENDIAN__)) 91 #define PTHREAD_ABI_BASE "GLIBC_2.3.2" 92 #elif defined(__aarch64__) || (defined(__powerpc64__) \ 93 && defined(__LITTLE_ENDIAN__)) 94 #define PTHREAD_ABI_BASE "GLIBC_2.17" 95 #endif 96 97 extern "C" int pthread_attr_init(void *attr); 98 extern "C" int pthread_attr_destroy(void *attr); 99 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) 100 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); 101 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); 102 extern "C" int pthread_setspecific(unsigned key, const void *v); 103 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) 104 extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set, 105 __sanitizer_sigset_t *oldset); 106 // REAL(sigfillset) defined in common interceptors. 107 DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set) 108 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) 109 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) 110 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) 111 extern "C" void *pthread_self(); 112 extern "C" void _exit(int status); 113 extern "C" int *__errno_location(); 114 extern "C" int fileno_unlocked(void *stream); 115 #if !SANITIZER_ANDROID 116 extern "C" void *__libc_calloc(uptr size, uptr n); 117 extern "C" void *__libc_realloc(void *ptr, uptr size); 118 #endif 119 extern "C" int dirfd(void *dirp); 120 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID 121 extern "C" int mallopt(int param, int value); 122 #endif 123 extern __sanitizer_FILE *stdout, *stderr; 124 const int PTHREAD_MUTEX_RECURSIVE = 1; 125 const int PTHREAD_MUTEX_RECURSIVE_NP = 1; 126 const int EINVAL = 22; 127 const int EBUSY = 16; 128 const int EOWNERDEAD = 130; 129 #if !SANITIZER_MAC 130 const int EPOLL_CTL_ADD = 1; 131 #endif 132 const int SIGILL = 4; 133 const int SIGABRT = 6; 134 const int SIGFPE = 8; 135 const int SIGSEGV = 11; 136 const int SIGPIPE = 13; 137 const int SIGTERM = 15; 138 #if defined(__mips__) || SANITIZER_MAC 139 const int SIGBUS = 10; 140 const int SIGSYS = 12; 141 #else 142 const int SIGBUS = 7; 143 const int SIGSYS = 31; 144 #endif 145 void *const MAP_FAILED = (void*)-1; 146 #if !SANITIZER_MAC 147 const int PTHREAD_BARRIER_SERIAL_THREAD = -1; 148 #endif 149 const int MAP_FIXED = 0x10; 150 typedef long long_t; // NOLINT 151 152 // From /usr/include/unistd.h 153 # define F_ULOCK 0 /* Unlock a previously locked region. */ 154 # define F_LOCK 1 /* Lock a region for exclusive use. */ 155 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ 156 # define F_TEST 3 /* Test a region for other processes locks. */ 157 158 #define errno (*__errno_location()) 159 160 typedef void (*sighandler_t)(int sig); 161 typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx); 162 163 #if SANITIZER_ANDROID 164 struct sigaction_t { 165 u32 sa_flags; 166 union { 167 sighandler_t sa_handler; 168 sigactionhandler_t sa_sgiaction; 169 }; 170 __sanitizer_sigset_t sa_mask; 171 void (*sa_restorer)(); 172 }; 173 #else 174 struct sigaction_t { 175 #ifdef __mips__ 176 u32 sa_flags; 177 #endif 178 union { 179 sighandler_t sa_handler; 180 sigactionhandler_t sa_sigaction; 181 }; 182 #if SANITIZER_FREEBSD 183 int sa_flags; 184 __sanitizer_sigset_t sa_mask; 185 #elif SANITIZER_MAC 186 __sanitizer_sigset_t sa_mask; 187 int sa_flags; 188 #else 189 __sanitizer_sigset_t sa_mask; 190 #ifndef __mips__ 191 int sa_flags; 192 #endif 193 void (*sa_restorer)(); 194 #endif 195 }; 196 #endif 197 198 const sighandler_t SIG_DFL = (sighandler_t)0; 199 const sighandler_t SIG_IGN = (sighandler_t)1; 200 const sighandler_t SIG_ERR = (sighandler_t)-1; 201 #if SANITIZER_FREEBSD || SANITIZER_MAC 202 const int SA_SIGINFO = 0x40; 203 const int SIG_SETMASK = 3; 204 #elif defined(__mips__) 205 const int SA_SIGINFO = 8; 206 const int SIG_SETMASK = 3; 207 #else 208 const int SA_SIGINFO = 4; 209 const int SIG_SETMASK = 2; 210 #endif 211 212 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ 213 (!cur_thread()->is_inited) 214 215 static sigaction_t sigactions[kSigCount]; 216 217 namespace __tsan { 218 struct SignalDesc { 219 bool armed; 220 bool sigaction; 221 my_siginfo_t siginfo; 222 ucontext_t ctx; 223 }; 224 225 struct ThreadSignalContext { 226 int int_signal_send; 227 atomic_uintptr_t in_blocking_func; 228 atomic_uintptr_t have_pending_signals; 229 SignalDesc pending_signals[kSigCount]; 230 // emptyset and oldset are too big for stack. 231 __sanitizer_sigset_t emptyset; 232 __sanitizer_sigset_t oldset; 233 }; 234 235 // The object is 64-byte aligned, because we want hot data to be located in 236 // a single cache line if possible (it's accessed in every interceptor). 237 static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)]; 238 static LibIgnore *libignore() { 239 return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]); 240 } 241 242 void InitializeLibIgnore() { 243 const SuppressionContext &supp = *Suppressions(); 244 const uptr n = supp.SuppressionCount(); 245 for (uptr i = 0; i < n; i++) { 246 const Suppression *s = supp.SuppressionAt(i); 247 if (0 == internal_strcmp(s->type, kSuppressionLib)) 248 libignore()->AddIgnoredLibrary(s->templ); 249 } 250 libignore()->OnLibraryLoaded(0); 251 } 252 253 } // namespace __tsan 254 255 static ThreadSignalContext *SigCtx(ThreadState *thr) { 256 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; 257 if (ctx == 0 && !thr->is_dead) { 258 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); 259 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); 260 thr->signal_ctx = ctx; 261 } 262 return ctx; 263 } 264 265 #if !SANITIZER_MAC 266 static unsigned g_thread_finalize_key; 267 #endif 268 269 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, 270 uptr pc) 271 : thr_(thr) 272 , pc_(pc) 273 , in_ignored_lib_(false) { 274 if (!thr_->ignore_interceptors) { 275 Initialize(thr); 276 FuncEntry(thr, pc); 277 } 278 DPrintf("#%d: intercept %s()\n", thr_->tid, fname); 279 if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) { 280 in_ignored_lib_ = true; 281 thr_->in_ignored_lib = true; 282 ThreadIgnoreBegin(thr_, pc_); 283 } 284 } 285 286 ScopedInterceptor::~ScopedInterceptor() { 287 if (in_ignored_lib_) { 288 thr_->in_ignored_lib = false; 289 ThreadIgnoreEnd(thr_, pc_); 290 } 291 if (!thr_->ignore_interceptors) { 292 ProcessPendingSignals(thr_); 293 FuncExit(thr_); 294 CheckNoLocks(thr_); 295 } 296 } 297 298 void ScopedInterceptor::UserCallbackStart() { 299 if (in_ignored_lib_) { 300 thr_->in_ignored_lib = false; 301 ThreadIgnoreEnd(thr_, pc_); 302 } 303 } 304 305 void ScopedInterceptor::UserCallbackEnd() { 306 if (in_ignored_lib_) { 307 thr_->in_ignored_lib = true; 308 ThreadIgnoreBegin(thr_, pc_); 309 } 310 } 311 312 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) 313 #if SANITIZER_FREEBSD 314 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) 315 #else 316 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) 317 #endif 318 319 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \ 320 MemoryAccessRange((thr), (pc), (uptr)(s), \ 321 common_flags()->strict_string_checks ? (len) + 1 : (n), false) 322 323 #define READ_STRING(thr, pc, s, n) \ 324 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n)) 325 326 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) 327 328 struct BlockingCall { 329 explicit BlockingCall(ThreadState *thr) 330 : thr(thr) 331 , ctx(SigCtx(thr)) { 332 for (;;) { 333 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed); 334 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0) 335 break; 336 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 337 ProcessPendingSignals(thr); 338 } 339 // When we are in a "blocking call", we process signals asynchronously 340 // (right when they arrive). In this context we do not expect to be 341 // executing any user/runtime code. The known interceptor sequence when 342 // this is not true is: pthread_join -> munmap(stack). It's fine 343 // to ignore munmap in this case -- we handle stack shadow separately. 344 thr->ignore_interceptors++; 345 } 346 347 ~BlockingCall() { 348 thr->ignore_interceptors--; 349 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 350 } 351 352 ThreadState *thr; 353 ThreadSignalContext *ctx; 354 }; 355 356 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { 357 SCOPED_TSAN_INTERCEPTOR(sleep, sec); 358 unsigned res = BLOCK_REAL(sleep)(sec); 359 AfterSleep(thr, pc); 360 return res; 361 } 362 363 TSAN_INTERCEPTOR(int, usleep, long_t usec) { 364 SCOPED_TSAN_INTERCEPTOR(usleep, usec); 365 int res = BLOCK_REAL(usleep)(usec); 366 AfterSleep(thr, pc); 367 return res; 368 } 369 370 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { 371 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); 372 int res = BLOCK_REAL(nanosleep)(req, rem); 373 AfterSleep(thr, pc); 374 return res; 375 } 376 377 // The sole reason tsan wraps atexit callbacks is to establish synchronization 378 // between callback setup and callback execution. 379 struct AtExitCtx { 380 void (*f)(); 381 void *arg; 382 }; 383 384 static void at_exit_wrapper(void *arg) { 385 ThreadState *thr = cur_thread(); 386 uptr pc = 0; 387 Acquire(thr, pc, (uptr)arg); 388 AtExitCtx *ctx = (AtExitCtx*)arg; 389 ((void(*)(void *arg))ctx->f)(ctx->arg); 390 __libc_free(ctx); 391 } 392 393 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 394 void *arg, void *dso); 395 396 #if !SANITIZER_ANDROID 397 TSAN_INTERCEPTOR(int, atexit, void (*f)()) { 398 if (cur_thread()->in_symbolizer) 399 return 0; 400 // We want to setup the atexit callback even if we are in ignored lib 401 // or after fork. 402 SCOPED_INTERCEPTOR_RAW(atexit, f); 403 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); 404 } 405 #endif 406 407 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { 408 if (cur_thread()->in_symbolizer) 409 return 0; 410 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); 411 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso); 412 } 413 414 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 415 void *arg, void *dso) { 416 AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx)); 417 ctx->f = f; 418 ctx->arg = arg; 419 Release(thr, pc, (uptr)ctx); 420 // Memory allocation in __cxa_atexit will race with free during exit, 421 // because we do not see synchronization around atexit callback list. 422 ThreadIgnoreBegin(thr, pc); 423 int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso); 424 ThreadIgnoreEnd(thr, pc); 425 return res; 426 } 427 428 #if !SANITIZER_MAC 429 static void on_exit_wrapper(int status, void *arg) { 430 ThreadState *thr = cur_thread(); 431 uptr pc = 0; 432 Acquire(thr, pc, (uptr)arg); 433 AtExitCtx *ctx = (AtExitCtx*)arg; 434 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); 435 __libc_free(ctx); 436 } 437 438 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { 439 if (cur_thread()->in_symbolizer) 440 return 0; 441 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); 442 AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx)); 443 ctx->f = (void(*)())f; 444 ctx->arg = arg; 445 Release(thr, pc, (uptr)ctx); 446 // Memory allocation in __cxa_atexit will race with free during exit, 447 // because we do not see synchronization around atexit callback list. 448 ThreadIgnoreBegin(thr, pc); 449 int res = REAL(on_exit)(on_exit_wrapper, ctx); 450 ThreadIgnoreEnd(thr, pc); 451 return res; 452 } 453 #endif 454 455 // Cleanup old bufs. 456 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { 457 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 458 JmpBuf *buf = &thr->jmp_bufs[i]; 459 if (buf->sp <= sp) { 460 uptr sz = thr->jmp_bufs.Size(); 461 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf)); 462 thr->jmp_bufs.PopBack(); 463 i--; 464 } 465 } 466 } 467 468 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { 469 if (!thr->is_inited) // called from libc guts during bootstrap 470 return; 471 // Cleanup old bufs. 472 JmpBufGarbageCollect(thr, sp); 473 // Remember the buf. 474 JmpBuf *buf = thr->jmp_bufs.PushBack(); 475 buf->sp = sp; 476 buf->mangled_sp = mangled_sp; 477 buf->shadow_stack_pos = thr->shadow_stack_pos; 478 ThreadSignalContext *sctx = SigCtx(thr); 479 buf->int_signal_send = sctx ? sctx->int_signal_send : 0; 480 buf->in_blocking_func = sctx ? 481 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : 482 false; 483 buf->in_signal_handler = atomic_load(&thr->in_signal_handler, 484 memory_order_relaxed); 485 } 486 487 static void LongJmp(ThreadState *thr, uptr *env) { 488 #ifdef __powerpc__ 489 uptr mangled_sp = env[0]; 490 #elif SANITIZER_FREEBSD || SANITIZER_MAC 491 uptr mangled_sp = env[2]; 492 #elif defined(SANITIZER_LINUX) 493 # ifdef __aarch64__ 494 uptr mangled_sp = env[13]; 495 # else 496 uptr mangled_sp = env[6]; 497 # endif 498 #endif 499 // Find the saved buf by mangled_sp. 500 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 501 JmpBuf *buf = &thr->jmp_bufs[i]; 502 if (buf->mangled_sp == mangled_sp) { 503 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); 504 // Unwind the stack. 505 while (thr->shadow_stack_pos > buf->shadow_stack_pos) 506 FuncExit(thr); 507 ThreadSignalContext *sctx = SigCtx(thr); 508 if (sctx) { 509 sctx->int_signal_send = buf->int_signal_send; 510 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, 511 memory_order_relaxed); 512 } 513 atomic_store(&thr->in_signal_handler, buf->in_signal_handler, 514 memory_order_relaxed); 515 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp 516 return; 517 } 518 } 519 Printf("ThreadSanitizer: can't find longjmp buf\n"); 520 CHECK(0); 521 } 522 523 // FIXME: put everything below into a common extern "C" block? 524 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { 525 SetJmp(cur_thread(), sp, mangled_sp); 526 } 527 528 #if SANITIZER_MAC 529 TSAN_INTERCEPTOR(int, setjmp, void *env); 530 TSAN_INTERCEPTOR(int, _setjmp, void *env); 531 TSAN_INTERCEPTOR(int, sigsetjmp, void *env); 532 #else // SANITIZER_MAC 533 // Not called. Merely to satisfy TSAN_INTERCEPT(). 534 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 535 int __interceptor_setjmp(void *env); 536 extern "C" int __interceptor_setjmp(void *env) { 537 CHECK(0); 538 return 0; 539 } 540 541 // FIXME: any reason to have a separate declaration? 542 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 543 int __interceptor__setjmp(void *env); 544 extern "C" int __interceptor__setjmp(void *env) { 545 CHECK(0); 546 return 0; 547 } 548 549 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 550 int __interceptor_sigsetjmp(void *env); 551 extern "C" int __interceptor_sigsetjmp(void *env) { 552 CHECK(0); 553 return 0; 554 } 555 556 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 557 int __interceptor___sigsetjmp(void *env); 558 extern "C" int __interceptor___sigsetjmp(void *env) { 559 CHECK(0); 560 return 0; 561 } 562 563 extern "C" int setjmp(void *env); 564 extern "C" int _setjmp(void *env); 565 extern "C" int sigsetjmp(void *env); 566 extern "C" int __sigsetjmp(void *env); 567 DEFINE_REAL(int, setjmp, void *env) 568 DEFINE_REAL(int, _setjmp, void *env) 569 DEFINE_REAL(int, sigsetjmp, void *env) 570 DEFINE_REAL(int, __sigsetjmp, void *env) 571 #endif // SANITIZER_MAC 572 573 TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) { 574 { 575 SCOPED_TSAN_INTERCEPTOR(longjmp, env, val); 576 } 577 LongJmp(cur_thread(), env); 578 REAL(longjmp)(env, val); 579 } 580 581 TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) { 582 { 583 SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val); 584 } 585 LongJmp(cur_thread(), env); 586 REAL(siglongjmp)(env, val); 587 } 588 589 #if !SANITIZER_MAC 590 TSAN_INTERCEPTOR(void*, malloc, uptr size) { 591 if (cur_thread()->in_symbolizer) 592 return __libc_malloc(size); 593 void *p = 0; 594 { 595 SCOPED_INTERCEPTOR_RAW(malloc, size); 596 p = user_alloc(thr, pc, size); 597 } 598 invoke_malloc_hook(p, size); 599 return p; 600 } 601 602 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { 603 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); 604 return user_alloc(thr, pc, sz, align); 605 } 606 607 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { 608 if (cur_thread()->in_symbolizer) 609 return __libc_calloc(size, n); 610 void *p = 0; 611 { 612 SCOPED_INTERCEPTOR_RAW(calloc, size, n); 613 p = user_calloc(thr, pc, size, n); 614 } 615 invoke_malloc_hook(p, n * size); 616 return p; 617 } 618 619 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { 620 if (cur_thread()->in_symbolizer) 621 return __libc_realloc(p, size); 622 if (p) 623 invoke_free_hook(p); 624 { 625 SCOPED_INTERCEPTOR_RAW(realloc, p, size); 626 p = user_realloc(thr, pc, p, size); 627 } 628 invoke_malloc_hook(p, size); 629 return p; 630 } 631 632 TSAN_INTERCEPTOR(void, free, void *p) { 633 if (p == 0) 634 return; 635 if (cur_thread()->in_symbolizer) 636 return __libc_free(p); 637 invoke_free_hook(p); 638 SCOPED_INTERCEPTOR_RAW(free, p); 639 user_free(thr, pc, p); 640 } 641 642 TSAN_INTERCEPTOR(void, cfree, void *p) { 643 if (p == 0) 644 return; 645 if (cur_thread()->in_symbolizer) 646 return __libc_free(p); 647 invoke_free_hook(p); 648 SCOPED_INTERCEPTOR_RAW(cfree, p); 649 user_free(thr, pc, p); 650 } 651 652 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { 653 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); 654 return user_alloc_usable_size(p); 655 } 656 #endif 657 658 TSAN_INTERCEPTOR(uptr, strlen, const char *s) { 659 SCOPED_TSAN_INTERCEPTOR(strlen, s); 660 uptr len = internal_strlen(s); 661 MemoryAccessRange(thr, pc, (uptr)s, len + 1, false); 662 return len; 663 } 664 665 TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) { 666 // On FreeBSD we get here from libthr internals on thread initialization. 667 if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { 668 SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size); 669 MemoryAccessRange(thr, pc, (uptr)dst, size, true); 670 } 671 return internal_memset(dst, v, size); 672 } 673 674 TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) { 675 // On FreeBSD we get here from libthr internals on thread initialization. 676 if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { 677 SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size); 678 MemoryAccessRange(thr, pc, (uptr)dst, size, true); 679 MemoryAccessRange(thr, pc, (uptr)src, size, false); 680 } 681 // On OS X, calling internal_memcpy here will cause memory corruptions, 682 // because memcpy and memmove are actually aliases of the same implementation. 683 // We need to use internal_memmove here. 684 return internal_memmove(dst, src, size); 685 } 686 687 TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) { 688 if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { 689 SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n); 690 MemoryAccessRange(thr, pc, (uptr)dst, n, true); 691 MemoryAccessRange(thr, pc, (uptr)src, n, false); 692 } 693 return REAL(memmove)(dst, src, n); 694 } 695 696 TSAN_INTERCEPTOR(char*, strchr, char *s, int c) { 697 SCOPED_TSAN_INTERCEPTOR(strchr, s, c); 698 char *res = REAL(strchr)(s, c); 699 uptr len = internal_strlen(s); 700 uptr n = res ? (char*)res - (char*)s + 1 : len + 1; 701 READ_STRING_OF_LEN(thr, pc, s, len, n); 702 return res; 703 } 704 705 #if !SANITIZER_MAC 706 TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) { 707 SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c); 708 char *res = REAL(strchrnul)(s, c); 709 uptr len = (char*)res - (char*)s + 1; 710 READ_STRING(thr, pc, s, len); 711 return res; 712 } 713 #endif 714 715 TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) { 716 SCOPED_TSAN_INTERCEPTOR(strrchr, s, c); 717 MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s) + 1, false); 718 return REAL(strrchr)(s, c); 719 } 720 721 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT 722 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT 723 uptr srclen = internal_strlen(src); 724 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true); 725 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false); 726 return REAL(strcpy)(dst, src); // NOLINT 727 } 728 729 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { 730 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n); 731 uptr srclen = internal_strnlen(src, n); 732 MemoryAccessRange(thr, pc, (uptr)dst, n, true); 733 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false); 734 return REAL(strncpy)(dst, src, n); 735 } 736 737 TSAN_INTERCEPTOR(char*, strdup, const char *str) { 738 SCOPED_TSAN_INTERCEPTOR(strdup, str); 739 // strdup will call malloc, so no instrumentation is required here. 740 return REAL(strdup)(str); 741 } 742 743 static bool fix_mmap_addr(void **addr, long_t sz, int flags) { 744 if (*addr) { 745 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) { 746 if (flags & MAP_FIXED) { 747 errno = EINVAL; 748 return false; 749 } else { 750 *addr = 0; 751 } 752 } 753 } 754 return true; 755 } 756 757 TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, 758 int fd, OFF_T off) { 759 SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off); 760 if (!fix_mmap_addr(&addr, sz, flags)) 761 return MAP_FAILED; 762 void *res = REAL(mmap)(addr, sz, prot, flags, fd, off); 763 if (res != MAP_FAILED) { 764 if (fd > 0) 765 FdAccess(thr, pc, fd); 766 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); 767 } 768 return res; 769 } 770 771 #if SANITIZER_LINUX 772 TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, 773 int fd, OFF64_T off) { 774 SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off); 775 if (!fix_mmap_addr(&addr, sz, flags)) 776 return MAP_FAILED; 777 void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off); 778 if (res != MAP_FAILED) { 779 if (fd > 0) 780 FdAccess(thr, pc, fd); 781 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); 782 } 783 return res; 784 } 785 #define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64) 786 #else 787 #define TSAN_MAYBE_INTERCEPT_MMAP64 788 #endif 789 790 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { 791 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); 792 if (sz != 0) { 793 // If sz == 0, munmap will return EINVAL and don't unmap any memory. 794 DontNeedShadowFor((uptr)addr, sz); 795 ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz); 796 } 797 int res = REAL(munmap)(addr, sz); 798 return res; 799 } 800 801 #if SANITIZER_LINUX 802 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { 803 SCOPED_INTERCEPTOR_RAW(memalign, align, sz); 804 return user_alloc(thr, pc, sz, align); 805 } 806 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) 807 #else 808 #define TSAN_MAYBE_INTERCEPT_MEMALIGN 809 #endif 810 811 #if !SANITIZER_MAC 812 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { 813 SCOPED_INTERCEPTOR_RAW(memalign, align, sz); 814 return user_alloc(thr, pc, sz, align); 815 } 816 817 TSAN_INTERCEPTOR(void*, valloc, uptr sz) { 818 SCOPED_INTERCEPTOR_RAW(valloc, sz); 819 return user_alloc(thr, pc, sz, GetPageSizeCached()); 820 } 821 #endif 822 823 #if SANITIZER_LINUX 824 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { 825 SCOPED_INTERCEPTOR_RAW(pvalloc, sz); 826 sz = RoundUp(sz, GetPageSizeCached()); 827 return user_alloc(thr, pc, sz, GetPageSizeCached()); 828 } 829 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) 830 #else 831 #define TSAN_MAYBE_INTERCEPT_PVALLOC 832 #endif 833 834 #if !SANITIZER_MAC 835 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { 836 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); 837 *memptr = user_alloc(thr, pc, sz, align); 838 return 0; 839 } 840 #endif 841 842 // __cxa_guard_acquire and friends need to be intercepted in a special way - 843 // regular interceptors will break statically-linked libstdc++. Linux 844 // interceptors are especially defined as weak functions (so that they don't 845 // cause link errors when user defines them as well). So they silently 846 // auto-disable themselves when such symbol is already present in the binary. If 847 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which 848 // will silently replace our interceptor. That's why on Linux we simply export 849 // these interceptors with INTERFACE_ATTRIBUTE. 850 // On OS X, we don't support statically linking, so we just use a regular 851 // interceptor. 852 #if SANITIZER_MAC 853 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR 854 #else 855 #define STDCXX_INTERCEPTOR(rettype, name, ...) \ 856 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__) 857 #endif 858 859 // Used in thread-safe function static initialization. 860 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { 861 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); 862 for (;;) { 863 u32 cmp = atomic_load(g, memory_order_acquire); 864 if (cmp == 0) { 865 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed)) 866 return 1; 867 } else if (cmp == 1) { 868 Acquire(thr, pc, (uptr)g); 869 return 0; 870 } else { 871 internal_sched_yield(); 872 } 873 } 874 } 875 876 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { 877 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); 878 Release(thr, pc, (uptr)g); 879 atomic_store(g, 1, memory_order_release); 880 } 881 882 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { 883 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); 884 atomic_store(g, 0, memory_order_relaxed); 885 } 886 887 namespace __tsan { 888 void DestroyThreadState() { 889 ThreadState *thr = cur_thread(); 890 ThreadFinish(thr); 891 ThreadSignalContext *sctx = thr->signal_ctx; 892 if (sctx) { 893 thr->signal_ctx = 0; 894 UnmapOrDie(sctx, sizeof(*sctx)); 895 } 896 cur_thread_finalize(); 897 } 898 } // namespace __tsan 899 900 #if !SANITIZER_MAC 901 static void thread_finalize(void *v) { 902 uptr iter = (uptr)v; 903 if (iter > 1) { 904 if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { 905 Printf("ThreadSanitizer: failed to set thread key\n"); 906 Die(); 907 } 908 return; 909 } 910 DestroyThreadState(); 911 } 912 #endif 913 914 915 struct ThreadParam { 916 void* (*callback)(void *arg); 917 void *param; 918 atomic_uintptr_t tid; 919 }; 920 921 extern "C" void *__tsan_thread_start_func(void *arg) { 922 ThreadParam *p = (ThreadParam*)arg; 923 void* (*callback)(void *arg) = p->callback; 924 void *param = p->param; 925 int tid = 0; 926 { 927 ThreadState *thr = cur_thread(); 928 // Thread-local state is not initialized yet. 929 ScopedIgnoreInterceptors ignore; 930 #if !SANITIZER_MAC 931 ThreadIgnoreBegin(thr, 0); 932 if (pthread_setspecific(g_thread_finalize_key, 933 (void *)GetPthreadDestructorIterations())) { 934 Printf("ThreadSanitizer: failed to set thread key\n"); 935 Die(); 936 } 937 ThreadIgnoreEnd(thr, 0); 938 #endif 939 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) 940 internal_sched_yield(); 941 ThreadStart(thr, tid, GetTid()); 942 atomic_store(&p->tid, 0, memory_order_release); 943 } 944 void *res = callback(param); 945 // Prevent the callback from being tail called, 946 // it mixes up stack traces. 947 volatile int foo = 42; 948 foo++; 949 return res; 950 } 951 952 TSAN_INTERCEPTOR(int, pthread_create, 953 void *th, void *attr, void *(*callback)(void*), void * param) { 954 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param); 955 if (ctx->after_multithreaded_fork) { 956 if (flags()->die_after_fork) { 957 Report("ThreadSanitizer: starting new threads after multi-threaded " 958 "fork is not supported. Dying (set die_after_fork=0 to override)\n"); 959 Die(); 960 } else { 961 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded " 962 "fork is not supported (pid %d). Continuing because of " 963 "die_after_fork=0, but you are on your own\n", internal_getpid()); 964 } 965 } 966 __sanitizer_pthread_attr_t myattr; 967 if (attr == 0) { 968 pthread_attr_init(&myattr); 969 attr = &myattr; 970 } 971 int detached = 0; 972 REAL(pthread_attr_getdetachstate)(attr, &detached); 973 AdjustStackSize(attr); 974 975 ThreadParam p; 976 p.callback = callback; 977 p.param = param; 978 atomic_store(&p.tid, 0, memory_order_relaxed); 979 int res = -1; 980 { 981 // Otherwise we see false positives in pthread stack manipulation. 982 ScopedIgnoreInterceptors ignore; 983 ThreadIgnoreBegin(thr, pc); 984 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); 985 ThreadIgnoreEnd(thr, pc); 986 } 987 if (res == 0) { 988 int tid = ThreadCreate(thr, pc, *(uptr*)th, 989 detached == PTHREAD_CREATE_DETACHED); 990 CHECK_NE(tid, 0); 991 // Synchronization on p.tid serves two purposes: 992 // 1. ThreadCreate must finish before the new thread starts. 993 // Otherwise the new thread can call pthread_detach, but the pthread_t 994 // identifier is not yet registered in ThreadRegistry by ThreadCreate. 995 // 2. ThreadStart must finish before this thread continues. 996 // Otherwise, this thread can call pthread_detach and reset thr->sync 997 // before the new thread got a chance to acquire from it in ThreadStart. 998 atomic_store(&p.tid, tid, memory_order_release); 999 while (atomic_load(&p.tid, memory_order_acquire) != 0) 1000 internal_sched_yield(); 1001 } 1002 if (attr == &myattr) 1003 pthread_attr_destroy(&myattr); 1004 return res; 1005 } 1006 1007 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { 1008 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); 1009 int tid = ThreadTid(thr, pc, (uptr)th); 1010 ThreadIgnoreBegin(thr, pc); 1011 int res = BLOCK_REAL(pthread_join)(th, ret); 1012 ThreadIgnoreEnd(thr, pc); 1013 if (res == 0) { 1014 ThreadJoin(thr, pc, tid); 1015 } 1016 return res; 1017 } 1018 1019 DEFINE_REAL_PTHREAD_FUNCTIONS 1020 1021 TSAN_INTERCEPTOR(int, pthread_detach, void *th) { 1022 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); 1023 int tid = ThreadTid(thr, pc, (uptr)th); 1024 int res = REAL(pthread_detach)(th); 1025 if (res == 0) { 1026 ThreadDetach(thr, pc, tid); 1027 } 1028 return res; 1029 } 1030 1031 // Problem: 1032 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2). 1033 // pthread_cond_t has different size in the different versions. 1034 // If call new REAL functions for old pthread_cond_t, they will corrupt memory 1035 // after pthread_cond_t (old cond is smaller). 1036 // If we call old REAL functions for new pthread_cond_t, we will lose some 1037 // functionality (e.g. old functions do not support waiting against 1038 // CLOCK_REALTIME). 1039 // Proper handling would require to have 2 versions of interceptors as well. 1040 // But this is messy, in particular requires linker scripts when sanitizer 1041 // runtime is linked into a shared library. 1042 // Instead we assume we don't have dynamic libraries built against old 1043 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag 1044 // that allows to work with old libraries (but this mode does not support 1045 // some features, e.g. pthread_condattr_getpshared). 1046 static void *init_cond(void *c, bool force = false) { 1047 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions. 1048 // So we allocate additional memory on the side large enough to hold 1049 // any pthread_cond_t object. Always call new REAL functions, but pass 1050 // the aux object to them. 1051 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes 1052 // first word of pthread_cond_t to zero. 1053 // It's all relevant only for linux. 1054 if (!common_flags()->legacy_pthread_cond) 1055 return c; 1056 atomic_uintptr_t *p = (atomic_uintptr_t*)c; 1057 uptr cond = atomic_load(p, memory_order_acquire); 1058 if (!force && cond != 0) 1059 return (void*)cond; 1060 void *newcond = WRAP(malloc)(pthread_cond_t_sz); 1061 internal_memset(newcond, 0, pthread_cond_t_sz); 1062 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, 1063 memory_order_acq_rel)) 1064 return newcond; 1065 WRAP(free)(newcond); 1066 return (void*)cond; 1067 } 1068 1069 struct CondMutexUnlockCtx { 1070 ScopedInterceptor *si; 1071 ThreadState *thr; 1072 uptr pc; 1073 void *m; 1074 }; 1075 1076 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { 1077 // pthread_cond_wait interceptor has enabled async signal delivery 1078 // (see BlockingCall below). Disable async signals since we are running 1079 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run 1080 // since the thread is cancelled, so we have to manually execute them 1081 // (the thread still can run some user code due to pthread_cleanup_push). 1082 ThreadSignalContext *ctx = SigCtx(arg->thr); 1083 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); 1084 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 1085 MutexLock(arg->thr, arg->pc, (uptr)arg->m); 1086 // Undo BlockingCall ctor effects. 1087 arg->thr->ignore_interceptors--; 1088 arg->si->~ScopedInterceptor(); 1089 } 1090 1091 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { 1092 void *cond = init_cond(c, true); 1093 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a); 1094 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1095 return REAL(pthread_cond_init)(cond, a); 1096 } 1097 1098 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { 1099 void *cond = init_cond(c); 1100 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); 1101 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1102 MutexUnlock(thr, pc, (uptr)m); 1103 CondMutexUnlockCtx arg = {&si, thr, pc, m}; 1104 int res = 0; 1105 // This ensures that we handle mutex lock even in case of pthread_cancel. 1106 // See test/tsan/cond_cancel.cc. 1107 { 1108 // Enable signal delivery while the thread is blocked. 1109 BlockingCall bc(thr); 1110 res = call_pthread_cancel_with_cleanup( 1111 (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait), 1112 cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg); 1113 } 1114 if (res == errno_EOWNERDEAD) 1115 MutexRepair(thr, pc, (uptr)m); 1116 MutexLock(thr, pc, (uptr)m); 1117 return res; 1118 } 1119 1120 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { 1121 void *cond = init_cond(c); 1122 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); 1123 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1124 MutexUnlock(thr, pc, (uptr)m); 1125 CondMutexUnlockCtx arg = {&si, thr, pc, m}; 1126 int res = 0; 1127 // This ensures that we handle mutex lock even in case of pthread_cancel. 1128 // See test/tsan/cond_cancel.cc. 1129 { 1130 BlockingCall bc(thr); 1131 res = call_pthread_cancel_with_cleanup( 1132 REAL(pthread_cond_timedwait), cond, m, abstime, 1133 (void(*)(void *arg))cond_mutex_unlock, &arg); 1134 } 1135 if (res == errno_EOWNERDEAD) 1136 MutexRepair(thr, pc, (uptr)m); 1137 MutexLock(thr, pc, (uptr)m); 1138 return res; 1139 } 1140 1141 INTERCEPTOR(int, pthread_cond_signal, void *c) { 1142 void *cond = init_cond(c); 1143 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond); 1144 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1145 return REAL(pthread_cond_signal)(cond); 1146 } 1147 1148 INTERCEPTOR(int, pthread_cond_broadcast, void *c) { 1149 void *cond = init_cond(c); 1150 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond); 1151 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1152 return REAL(pthread_cond_broadcast)(cond); 1153 } 1154 1155 INTERCEPTOR(int, pthread_cond_destroy, void *c) { 1156 void *cond = init_cond(c); 1157 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond); 1158 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1159 int res = REAL(pthread_cond_destroy)(cond); 1160 if (common_flags()->legacy_pthread_cond) { 1161 // Free our aux cond and zero the pointer to not leave dangling pointers. 1162 WRAP(free)(cond); 1163 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); 1164 } 1165 return res; 1166 } 1167 1168 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { 1169 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); 1170 int res = REAL(pthread_mutex_init)(m, a); 1171 if (res == 0) { 1172 bool recursive = false; 1173 if (a) { 1174 int type = 0; 1175 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) 1176 recursive = (type == PTHREAD_MUTEX_RECURSIVE 1177 || type == PTHREAD_MUTEX_RECURSIVE_NP); 1178 } 1179 MutexCreate(thr, pc, (uptr)m, false, recursive, false); 1180 } 1181 return res; 1182 } 1183 1184 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { 1185 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); 1186 int res = REAL(pthread_mutex_destroy)(m); 1187 if (res == 0 || res == EBUSY) { 1188 MutexDestroy(thr, pc, (uptr)m); 1189 } 1190 return res; 1191 } 1192 1193 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { 1194 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); 1195 int res = REAL(pthread_mutex_trylock)(m); 1196 if (res == EOWNERDEAD) 1197 MutexRepair(thr, pc, (uptr)m); 1198 if (res == 0 || res == EOWNERDEAD) 1199 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); 1200 return res; 1201 } 1202 1203 #if !SANITIZER_MAC 1204 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { 1205 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); 1206 int res = REAL(pthread_mutex_timedlock)(m, abstime); 1207 if (res == 0) { 1208 MutexLock(thr, pc, (uptr)m); 1209 } 1210 return res; 1211 } 1212 #endif 1213 1214 #if !SANITIZER_MAC 1215 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { 1216 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); 1217 int res = REAL(pthread_spin_init)(m, pshared); 1218 if (res == 0) { 1219 MutexCreate(thr, pc, (uptr)m, false, false, false); 1220 } 1221 return res; 1222 } 1223 1224 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { 1225 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); 1226 int res = REAL(pthread_spin_destroy)(m); 1227 if (res == 0) { 1228 MutexDestroy(thr, pc, (uptr)m); 1229 } 1230 return res; 1231 } 1232 1233 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { 1234 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); 1235 int res = REAL(pthread_spin_lock)(m); 1236 if (res == 0) { 1237 MutexLock(thr, pc, (uptr)m); 1238 } 1239 return res; 1240 } 1241 1242 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { 1243 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); 1244 int res = REAL(pthread_spin_trylock)(m); 1245 if (res == 0) { 1246 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); 1247 } 1248 return res; 1249 } 1250 1251 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { 1252 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); 1253 MutexUnlock(thr, pc, (uptr)m); 1254 int res = REAL(pthread_spin_unlock)(m); 1255 return res; 1256 } 1257 #endif 1258 1259 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { 1260 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); 1261 int res = REAL(pthread_rwlock_init)(m, a); 1262 if (res == 0) { 1263 MutexCreate(thr, pc, (uptr)m, true, false, false); 1264 } 1265 return res; 1266 } 1267 1268 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { 1269 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); 1270 int res = REAL(pthread_rwlock_destroy)(m); 1271 if (res == 0) { 1272 MutexDestroy(thr, pc, (uptr)m); 1273 } 1274 return res; 1275 } 1276 1277 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { 1278 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); 1279 int res = REAL(pthread_rwlock_rdlock)(m); 1280 if (res == 0) { 1281 MutexReadLock(thr, pc, (uptr)m); 1282 } 1283 return res; 1284 } 1285 1286 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { 1287 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); 1288 int res = REAL(pthread_rwlock_tryrdlock)(m); 1289 if (res == 0) { 1290 MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true); 1291 } 1292 return res; 1293 } 1294 1295 #if !SANITIZER_MAC 1296 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { 1297 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); 1298 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); 1299 if (res == 0) { 1300 MutexReadLock(thr, pc, (uptr)m); 1301 } 1302 return res; 1303 } 1304 #endif 1305 1306 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { 1307 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); 1308 int res = REAL(pthread_rwlock_wrlock)(m); 1309 if (res == 0) { 1310 MutexLock(thr, pc, (uptr)m); 1311 } 1312 return res; 1313 } 1314 1315 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { 1316 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); 1317 int res = REAL(pthread_rwlock_trywrlock)(m); 1318 if (res == 0) { 1319 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); 1320 } 1321 return res; 1322 } 1323 1324 #if !SANITIZER_MAC 1325 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { 1326 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); 1327 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); 1328 if (res == 0) { 1329 MutexLock(thr, pc, (uptr)m); 1330 } 1331 return res; 1332 } 1333 #endif 1334 1335 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { 1336 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); 1337 MutexReadOrWriteUnlock(thr, pc, (uptr)m); 1338 int res = REAL(pthread_rwlock_unlock)(m); 1339 return res; 1340 } 1341 1342 #if !SANITIZER_MAC 1343 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { 1344 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); 1345 MemoryWrite(thr, pc, (uptr)b, kSizeLog1); 1346 int res = REAL(pthread_barrier_init)(b, a, count); 1347 return res; 1348 } 1349 1350 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { 1351 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); 1352 MemoryWrite(thr, pc, (uptr)b, kSizeLog1); 1353 int res = REAL(pthread_barrier_destroy)(b); 1354 return res; 1355 } 1356 1357 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { 1358 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); 1359 Release(thr, pc, (uptr)b); 1360 MemoryRead(thr, pc, (uptr)b, kSizeLog1); 1361 int res = REAL(pthread_barrier_wait)(b); 1362 MemoryRead(thr, pc, (uptr)b, kSizeLog1); 1363 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { 1364 Acquire(thr, pc, (uptr)b); 1365 } 1366 return res; 1367 } 1368 #endif 1369 1370 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { 1371 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); 1372 if (o == 0 || f == 0) 1373 return EINVAL; 1374 atomic_uint32_t *a; 1375 if (!SANITIZER_MAC) 1376 a = static_cast<atomic_uint32_t*>(o); 1377 else // On OS X, pthread_once_t has a header with a long-sized signature. 1378 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t))); 1379 u32 v = atomic_load(a, memory_order_acquire); 1380 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1, 1381 memory_order_relaxed)) { 1382 (*f)(); 1383 if (!thr->in_ignored_lib) 1384 Release(thr, pc, (uptr)o); 1385 atomic_store(a, 2, memory_order_release); 1386 } else { 1387 while (v != 2) { 1388 internal_sched_yield(); 1389 v = atomic_load(a, memory_order_acquire); 1390 } 1391 if (!thr->in_ignored_lib) 1392 Acquire(thr, pc, (uptr)o); 1393 } 1394 return 0; 1395 } 1396 1397 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1398 TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { 1399 SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf); 1400 READ_STRING(thr, pc, path, 0); 1401 return REAL(__xstat)(version, path, buf); 1402 } 1403 #define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat) 1404 #else 1405 #define TSAN_MAYBE_INTERCEPT___XSTAT 1406 #endif 1407 1408 TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { 1409 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID 1410 SCOPED_TSAN_INTERCEPTOR(stat, path, buf); 1411 READ_STRING(thr, pc, path, 0); 1412 return REAL(stat)(path, buf); 1413 #else 1414 SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf); 1415 READ_STRING(thr, pc, path, 0); 1416 return REAL(__xstat)(0, path, buf); 1417 #endif 1418 } 1419 1420 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1421 TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { 1422 SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf); 1423 READ_STRING(thr, pc, path, 0); 1424 return REAL(__xstat64)(version, path, buf); 1425 } 1426 #define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64) 1427 #else 1428 #define TSAN_MAYBE_INTERCEPT___XSTAT64 1429 #endif 1430 1431 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1432 TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { 1433 SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf); 1434 READ_STRING(thr, pc, path, 0); 1435 return REAL(__xstat64)(0, path, buf); 1436 } 1437 #define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64) 1438 #else 1439 #define TSAN_MAYBE_INTERCEPT_STAT64 1440 #endif 1441 1442 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1443 TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { 1444 SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf); 1445 READ_STRING(thr, pc, path, 0); 1446 return REAL(__lxstat)(version, path, buf); 1447 } 1448 #define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat) 1449 #else 1450 #define TSAN_MAYBE_INTERCEPT___LXSTAT 1451 #endif 1452 1453 TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { 1454 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID 1455 SCOPED_TSAN_INTERCEPTOR(lstat, path, buf); 1456 READ_STRING(thr, pc, path, 0); 1457 return REAL(lstat)(path, buf); 1458 #else 1459 SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf); 1460 READ_STRING(thr, pc, path, 0); 1461 return REAL(__lxstat)(0, path, buf); 1462 #endif 1463 } 1464 1465 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1466 TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { 1467 SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf); 1468 READ_STRING(thr, pc, path, 0); 1469 return REAL(__lxstat64)(version, path, buf); 1470 } 1471 #define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64) 1472 #else 1473 #define TSAN_MAYBE_INTERCEPT___LXSTAT64 1474 #endif 1475 1476 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1477 TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) { 1478 SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf); 1479 READ_STRING(thr, pc, path, 0); 1480 return REAL(__lxstat64)(0, path, buf); 1481 } 1482 #define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64) 1483 #else 1484 #define TSAN_MAYBE_INTERCEPT_LSTAT64 1485 #endif 1486 1487 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1488 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { 1489 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); 1490 if (fd > 0) 1491 FdAccess(thr, pc, fd); 1492 return REAL(__fxstat)(version, fd, buf); 1493 } 1494 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat) 1495 #else 1496 #define TSAN_MAYBE_INTERCEPT___FXSTAT 1497 #endif 1498 1499 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { 1500 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID 1501 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); 1502 if (fd > 0) 1503 FdAccess(thr, pc, fd); 1504 return REAL(fstat)(fd, buf); 1505 #else 1506 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); 1507 if (fd > 0) 1508 FdAccess(thr, pc, fd); 1509 return REAL(__fxstat)(0, fd, buf); 1510 #endif 1511 } 1512 1513 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1514 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { 1515 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); 1516 if (fd > 0) 1517 FdAccess(thr, pc, fd); 1518 return REAL(__fxstat64)(version, fd, buf); 1519 } 1520 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64) 1521 #else 1522 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 1523 #endif 1524 1525 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1526 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { 1527 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); 1528 if (fd > 0) 1529 FdAccess(thr, pc, fd); 1530 return REAL(__fxstat64)(0, fd, buf); 1531 } 1532 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64) 1533 #else 1534 #define TSAN_MAYBE_INTERCEPT_FSTAT64 1535 #endif 1536 1537 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { 1538 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); 1539 READ_STRING(thr, pc, name, 0); 1540 int fd = REAL(open)(name, flags, mode); 1541 if (fd >= 0) 1542 FdFileCreate(thr, pc, fd); 1543 return fd; 1544 } 1545 1546 #if SANITIZER_LINUX 1547 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { 1548 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); 1549 READ_STRING(thr, pc, name, 0); 1550 int fd = REAL(open64)(name, flags, mode); 1551 if (fd >= 0) 1552 FdFileCreate(thr, pc, fd); 1553 return fd; 1554 } 1555 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64) 1556 #else 1557 #define TSAN_MAYBE_INTERCEPT_OPEN64 1558 #endif 1559 1560 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { 1561 SCOPED_TSAN_INTERCEPTOR(creat, name, mode); 1562 READ_STRING(thr, pc, name, 0); 1563 int fd = REAL(creat)(name, mode); 1564 if (fd >= 0) 1565 FdFileCreate(thr, pc, fd); 1566 return fd; 1567 } 1568 1569 #if SANITIZER_LINUX 1570 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { 1571 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); 1572 READ_STRING(thr, pc, name, 0); 1573 int fd = REAL(creat64)(name, mode); 1574 if (fd >= 0) 1575 FdFileCreate(thr, pc, fd); 1576 return fd; 1577 } 1578 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64) 1579 #else 1580 #define TSAN_MAYBE_INTERCEPT_CREAT64 1581 #endif 1582 1583 TSAN_INTERCEPTOR(int, dup, int oldfd) { 1584 SCOPED_TSAN_INTERCEPTOR(dup, oldfd); 1585 int newfd = REAL(dup)(oldfd); 1586 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) 1587 FdDup(thr, pc, oldfd, newfd, true); 1588 return newfd; 1589 } 1590 1591 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { 1592 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); 1593 int newfd2 = REAL(dup2)(oldfd, newfd); 1594 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1595 FdDup(thr, pc, oldfd, newfd2, false); 1596 return newfd2; 1597 } 1598 1599 #if !SANITIZER_MAC 1600 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { 1601 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); 1602 int newfd2 = REAL(dup3)(oldfd, newfd, flags); 1603 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1604 FdDup(thr, pc, oldfd, newfd2, false); 1605 return newfd2; 1606 } 1607 #endif 1608 1609 #if SANITIZER_LINUX 1610 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { 1611 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); 1612 int fd = REAL(eventfd)(initval, flags); 1613 if (fd >= 0) 1614 FdEventCreate(thr, pc, fd); 1615 return fd; 1616 } 1617 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd) 1618 #else 1619 #define TSAN_MAYBE_INTERCEPT_EVENTFD 1620 #endif 1621 1622 #if SANITIZER_LINUX 1623 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { 1624 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); 1625 if (fd >= 0) 1626 FdClose(thr, pc, fd); 1627 fd = REAL(signalfd)(fd, mask, flags); 1628 if (fd >= 0) 1629 FdSignalCreate(thr, pc, fd); 1630 return fd; 1631 } 1632 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd) 1633 #else 1634 #define TSAN_MAYBE_INTERCEPT_SIGNALFD 1635 #endif 1636 1637 #if SANITIZER_LINUX 1638 TSAN_INTERCEPTOR(int, inotify_init, int fake) { 1639 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); 1640 int fd = REAL(inotify_init)(fake); 1641 if (fd >= 0) 1642 FdInotifyCreate(thr, pc, fd); 1643 return fd; 1644 } 1645 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init) 1646 #else 1647 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT 1648 #endif 1649 1650 #if SANITIZER_LINUX 1651 TSAN_INTERCEPTOR(int, inotify_init1, int flags) { 1652 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); 1653 int fd = REAL(inotify_init1)(flags); 1654 if (fd >= 0) 1655 FdInotifyCreate(thr, pc, fd); 1656 return fd; 1657 } 1658 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1) 1659 #else 1660 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 1661 #endif 1662 1663 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { 1664 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); 1665 int fd = REAL(socket)(domain, type, protocol); 1666 if (fd >= 0) 1667 FdSocketCreate(thr, pc, fd); 1668 return fd; 1669 } 1670 1671 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { 1672 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); 1673 int res = REAL(socketpair)(domain, type, protocol, fd); 1674 if (res == 0 && fd[0] >= 0 && fd[1] >= 0) 1675 FdPipeCreate(thr, pc, fd[0], fd[1]); 1676 return res; 1677 } 1678 1679 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { 1680 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); 1681 FdSocketConnecting(thr, pc, fd); 1682 int res = REAL(connect)(fd, addr, addrlen); 1683 if (res == 0 && fd >= 0) 1684 FdSocketConnect(thr, pc, fd); 1685 return res; 1686 } 1687 1688 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { 1689 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); 1690 int res = REAL(bind)(fd, addr, addrlen); 1691 if (fd > 0 && res == 0) 1692 FdAccess(thr, pc, fd); 1693 return res; 1694 } 1695 1696 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { 1697 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); 1698 int res = REAL(listen)(fd, backlog); 1699 if (fd > 0 && res == 0) 1700 FdAccess(thr, pc, fd); 1701 return res; 1702 } 1703 1704 #if SANITIZER_LINUX 1705 TSAN_INTERCEPTOR(int, epoll_create, int size) { 1706 SCOPED_TSAN_INTERCEPTOR(epoll_create, size); 1707 int fd = REAL(epoll_create)(size); 1708 if (fd >= 0) 1709 FdPollCreate(thr, pc, fd); 1710 return fd; 1711 } 1712 #define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create) 1713 #else 1714 #define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE 1715 #endif 1716 1717 #if SANITIZER_LINUX 1718 TSAN_INTERCEPTOR(int, epoll_create1, int flags) { 1719 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); 1720 int fd = REAL(epoll_create1)(flags); 1721 if (fd >= 0) 1722 FdPollCreate(thr, pc, fd); 1723 return fd; 1724 } 1725 #define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1) 1726 #else 1727 #define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 1728 #endif 1729 1730 TSAN_INTERCEPTOR(int, close, int fd) { 1731 SCOPED_TSAN_INTERCEPTOR(close, fd); 1732 if (fd >= 0) 1733 FdClose(thr, pc, fd); 1734 return REAL(close)(fd); 1735 } 1736 1737 #if SANITIZER_LINUX 1738 TSAN_INTERCEPTOR(int, __close, int fd) { 1739 SCOPED_TSAN_INTERCEPTOR(__close, fd); 1740 if (fd >= 0) 1741 FdClose(thr, pc, fd); 1742 return REAL(__close)(fd); 1743 } 1744 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) 1745 #else 1746 #define TSAN_MAYBE_INTERCEPT___CLOSE 1747 #endif 1748 1749 // glibc guts 1750 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1751 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { 1752 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); 1753 int fds[64]; 1754 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); 1755 for (int i = 0; i < cnt; i++) { 1756 if (fds[i] > 0) 1757 FdClose(thr, pc, fds[i]); 1758 } 1759 REAL(__res_iclose)(state, free_addr); 1760 } 1761 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) 1762 #else 1763 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE 1764 #endif 1765 1766 TSAN_INTERCEPTOR(int, pipe, int *pipefd) { 1767 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); 1768 int res = REAL(pipe)(pipefd); 1769 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1770 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1771 return res; 1772 } 1773 1774 #if !SANITIZER_MAC 1775 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { 1776 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); 1777 int res = REAL(pipe2)(pipefd, flags); 1778 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1779 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1780 return res; 1781 } 1782 #endif 1783 1784 TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) { 1785 SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags); 1786 if (fd >= 0) { 1787 FdAccess(thr, pc, fd); 1788 FdRelease(thr, pc, fd); 1789 } 1790 int res = REAL(send)(fd, buf, len, flags); 1791 return res; 1792 } 1793 1794 TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) { 1795 SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags); 1796 if (fd >= 0) { 1797 FdAccess(thr, pc, fd); 1798 FdRelease(thr, pc, fd); 1799 } 1800 int res = REAL(sendmsg)(fd, msg, flags); 1801 return res; 1802 } 1803 1804 TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) { 1805 SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags); 1806 if (fd >= 0) 1807 FdAccess(thr, pc, fd); 1808 int res = REAL(recv)(fd, buf, len, flags); 1809 if (res >= 0 && fd >= 0) { 1810 FdAcquire(thr, pc, fd); 1811 } 1812 return res; 1813 } 1814 1815 TSAN_INTERCEPTOR(int, unlink, char *path) { 1816 SCOPED_TSAN_INTERCEPTOR(unlink, path); 1817 Release(thr, pc, File2addr(path)); 1818 int res = REAL(unlink)(path); 1819 return res; 1820 } 1821 1822 TSAN_INTERCEPTOR(void*, tmpfile, int fake) { 1823 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake); 1824 void *res = REAL(tmpfile)(fake); 1825 if (res) { 1826 int fd = fileno_unlocked(res); 1827 if (fd >= 0) 1828 FdFileCreate(thr, pc, fd); 1829 } 1830 return res; 1831 } 1832 1833 #if SANITIZER_LINUX 1834 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { 1835 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); 1836 void *res = REAL(tmpfile64)(fake); 1837 if (res) { 1838 int fd = fileno_unlocked(res); 1839 if (fd >= 0) 1840 FdFileCreate(thr, pc, fd); 1841 } 1842 return res; 1843 } 1844 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64) 1845 #else 1846 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 1847 #endif 1848 1849 TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) { 1850 // libc file streams can call user-supplied functions, see fopencookie. 1851 { 1852 SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f); 1853 MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true); 1854 } 1855 return REAL(fread)(ptr, size, nmemb, f); 1856 } 1857 1858 TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) { 1859 // libc file streams can call user-supplied functions, see fopencookie. 1860 { 1861 SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f); 1862 MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false); 1863 } 1864 return REAL(fwrite)(p, size, nmemb, f); 1865 } 1866 1867 static void FlushStreams() { 1868 // Flushing all the streams here may freeze the process if a child thread is 1869 // performing file stream operations at the same time. 1870 REAL(fflush)(stdout); 1871 REAL(fflush)(stderr); 1872 } 1873 1874 TSAN_INTERCEPTOR(void, abort, int fake) { 1875 SCOPED_TSAN_INTERCEPTOR(abort, fake); 1876 FlushStreams(); 1877 REAL(abort)(fake); 1878 } 1879 1880 TSAN_INTERCEPTOR(int, puts, const char *s) { 1881 SCOPED_TSAN_INTERCEPTOR(puts, s); 1882 MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false); 1883 return REAL(puts)(s); 1884 } 1885 1886 TSAN_INTERCEPTOR(int, rmdir, char *path) { 1887 SCOPED_TSAN_INTERCEPTOR(rmdir, path); 1888 Release(thr, pc, Dir2addr(path)); 1889 int res = REAL(rmdir)(path); 1890 return res; 1891 } 1892 1893 TSAN_INTERCEPTOR(int, closedir, void *dirp) { 1894 SCOPED_TSAN_INTERCEPTOR(closedir, dirp); 1895 int fd = dirfd(dirp); 1896 FdClose(thr, pc, fd); 1897 return REAL(closedir)(dirp); 1898 } 1899 1900 #if SANITIZER_LINUX 1901 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { 1902 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); 1903 if (epfd >= 0) 1904 FdAccess(thr, pc, epfd); 1905 if (epfd >= 0 && fd >= 0) 1906 FdAccess(thr, pc, fd); 1907 if (op == EPOLL_CTL_ADD && epfd >= 0) 1908 FdRelease(thr, pc, epfd); 1909 int res = REAL(epoll_ctl)(epfd, op, fd, ev); 1910 return res; 1911 } 1912 #define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl) 1913 #else 1914 #define TSAN_MAYBE_INTERCEPT_EPOLL_CTL 1915 #endif 1916 1917 #if SANITIZER_LINUX 1918 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { 1919 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); 1920 if (epfd >= 0) 1921 FdAccess(thr, pc, epfd); 1922 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); 1923 if (res > 0 && epfd >= 0) 1924 FdAcquire(thr, pc, epfd); 1925 return res; 1926 } 1927 #define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait) 1928 #else 1929 #define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT 1930 #endif 1931 1932 namespace __tsan { 1933 1934 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, 1935 bool sigact, int sig, my_siginfo_t *info, void *uctx) { 1936 if (acquire) 1937 Acquire(thr, 0, (uptr)&sigactions[sig]); 1938 // Ensure that the handler does not spoil errno. 1939 const int saved_errno = errno; 1940 errno = 99; 1941 // This code races with sigaction. Be careful to not read sa_sigaction twice. 1942 // Also need to remember pc for reporting before the call, 1943 // because the handler can reset it. 1944 volatile uptr pc = sigact ? 1945 (uptr)sigactions[sig].sa_sigaction : 1946 (uptr)sigactions[sig].sa_handler; 1947 if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) { 1948 if (sigact) 1949 ((sigactionhandler_t)pc)(sig, info, uctx); 1950 else 1951 ((sighandler_t)pc)(sig); 1952 } 1953 // We do not detect errno spoiling for SIGTERM, 1954 // because some SIGTERM handlers do spoil errno but reraise SIGTERM, 1955 // tsan reports false positive in such case. 1956 // It's difficult to properly detect this situation (reraise), 1957 // because in async signal processing case (when handler is called directly 1958 // from rtl_generic_sighandler) we have not yet received the reraised 1959 // signal; and it looks too fragile to intercept all ways to reraise a signal. 1960 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { 1961 VarSizeStackTrace stack; 1962 // StackTrace::GetNestInstructionPc(pc) is used because return address is 1963 // expected, OutputReport() will undo this. 1964 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); 1965 ThreadRegistryLock l(ctx->thread_registry); 1966 ScopedReport rep(ReportTypeErrnoInSignal); 1967 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { 1968 rep.AddStack(stack, true); 1969 OutputReport(thr, rep); 1970 } 1971 } 1972 errno = saved_errno; 1973 } 1974 1975 void ProcessPendingSignals(ThreadState *thr) { 1976 ThreadSignalContext *sctx = SigCtx(thr); 1977 if (sctx == 0 || 1978 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) 1979 return; 1980 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); 1981 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 1982 CHECK_EQ(0, REAL(sigfillset)(&sctx->emptyset)); 1983 CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->emptyset, &sctx->oldset)); 1984 for (int sig = 0; sig < kSigCount; sig++) { 1985 SignalDesc *signal = &sctx->pending_signals[sig]; 1986 if (signal->armed) { 1987 signal->armed = false; 1988 CallUserSignalHandler(thr, false, true, signal->sigaction, sig, 1989 &signal->siginfo, &signal->ctx); 1990 } 1991 } 1992 CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->oldset, 0)); 1993 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 1994 } 1995 1996 } // namespace __tsan 1997 1998 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { 1999 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || 2000 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || 2001 // If we are sending signal to ourselves, we must process it now. 2002 (sctx && sig == sctx->int_signal_send); 2003 } 2004 2005 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, 2006 my_siginfo_t *info, void *ctx) { 2007 ThreadState *thr = cur_thread(); 2008 ThreadSignalContext *sctx = SigCtx(thr); 2009 if (sig < 0 || sig >= kSigCount) { 2010 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); 2011 return; 2012 } 2013 // Don't mess with synchronous signals. 2014 const bool sync = is_sync_signal(sctx, sig); 2015 if (sync || 2016 // If we are in blocking function, we can safely process it now 2017 // (but check if we are in a recursive interceptor, 2018 // i.e. pthread_join()->munmap()). 2019 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) { 2020 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 2021 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) { 2022 // We ignore interceptors in blocking functions, 2023 // temporary enbled them again while we are calling user function. 2024 int const i = thr->ignore_interceptors; 2025 thr->ignore_interceptors = 0; 2026 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed); 2027 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx); 2028 thr->ignore_interceptors = i; 2029 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed); 2030 } else { 2031 // Be very conservative with when we do acquire in this case. 2032 // It's unsafe to do acquire in async handlers, because ThreadState 2033 // can be in inconsistent state. 2034 // SIGSYS looks relatively safe -- it's synchronous and can actually 2035 // need some global state. 2036 bool acq = (sig == SIGSYS); 2037 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx); 2038 } 2039 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 2040 return; 2041 } 2042 2043 if (sctx == 0) 2044 return; 2045 SignalDesc *signal = &sctx->pending_signals[sig]; 2046 if (signal->armed == false) { 2047 signal->armed = true; 2048 signal->sigaction = sigact; 2049 if (info) 2050 internal_memcpy(&signal->siginfo, info, sizeof(*info)); 2051 if (ctx) 2052 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx)); 2053 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed); 2054 } 2055 } 2056 2057 static void rtl_sighandler(int sig) { 2058 rtl_generic_sighandler(false, sig, 0, 0); 2059 } 2060 2061 static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) { 2062 rtl_generic_sighandler(true, sig, info, ctx); 2063 } 2064 2065 TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) { 2066 SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old); 2067 if (old) 2068 internal_memcpy(old, &sigactions[sig], sizeof(*old)); 2069 if (act == 0) 2070 return 0; 2071 // Copy act into sigactions[sig]. 2072 // Can't use struct copy, because compiler can emit call to memcpy. 2073 // Can't use internal_memcpy, because it copies byte-by-byte, 2074 // and signal handler reads the sa_handler concurrently. It it can read 2075 // some bytes from old value and some bytes from new value. 2076 // Use volatile to prevent insertion of memcpy. 2077 sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler; 2078 sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags; 2079 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, 2080 sizeof(sigactions[sig].sa_mask)); 2081 #if !SANITIZER_FREEBSD && !SANITIZER_MAC 2082 sigactions[sig].sa_restorer = act->sa_restorer; 2083 #endif 2084 sigaction_t newact; 2085 internal_memcpy(&newact, act, sizeof(newact)); 2086 REAL(sigfillset)(&newact.sa_mask); 2087 if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) { 2088 if (newact.sa_flags & SA_SIGINFO) 2089 newact.sa_sigaction = rtl_sigaction; 2090 else 2091 newact.sa_handler = rtl_sighandler; 2092 } 2093 ReleaseStore(thr, pc, (uptr)&sigactions[sig]); 2094 int res = REAL(sigaction)(sig, &newact, 0); 2095 return res; 2096 } 2097 2098 TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) { 2099 sigaction_t act; 2100 act.sa_handler = h; 2101 REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask)); 2102 act.sa_flags = 0; 2103 sigaction_t old; 2104 int res = sigaction(sig, &act, &old); 2105 if (res) 2106 return SIG_ERR; 2107 return old.sa_handler; 2108 } 2109 2110 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { 2111 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask); 2112 return REAL(sigsuspend)(mask); 2113 } 2114 2115 TSAN_INTERCEPTOR(int, raise, int sig) { 2116 SCOPED_TSAN_INTERCEPTOR(raise, sig); 2117 ThreadSignalContext *sctx = SigCtx(thr); 2118 CHECK_NE(sctx, 0); 2119 int prev = sctx->int_signal_send; 2120 sctx->int_signal_send = sig; 2121 int res = REAL(raise)(sig); 2122 CHECK_EQ(sctx->int_signal_send, sig); 2123 sctx->int_signal_send = prev; 2124 return res; 2125 } 2126 2127 TSAN_INTERCEPTOR(int, kill, int pid, int sig) { 2128 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); 2129 ThreadSignalContext *sctx = SigCtx(thr); 2130 CHECK_NE(sctx, 0); 2131 int prev = sctx->int_signal_send; 2132 if (pid == (int)internal_getpid()) { 2133 sctx->int_signal_send = sig; 2134 } 2135 int res = REAL(kill)(pid, sig); 2136 if (pid == (int)internal_getpid()) { 2137 CHECK_EQ(sctx->int_signal_send, sig); 2138 sctx->int_signal_send = prev; 2139 } 2140 return res; 2141 } 2142 2143 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { 2144 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); 2145 ThreadSignalContext *sctx = SigCtx(thr); 2146 CHECK_NE(sctx, 0); 2147 int prev = sctx->int_signal_send; 2148 if (tid == pthread_self()) { 2149 sctx->int_signal_send = sig; 2150 } 2151 int res = REAL(pthread_kill)(tid, sig); 2152 if (tid == pthread_self()) { 2153 CHECK_EQ(sctx->int_signal_send, sig); 2154 sctx->int_signal_send = prev; 2155 } 2156 return res; 2157 } 2158 2159 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { 2160 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); 2161 // It's intercepted merely to process pending signals. 2162 return REAL(gettimeofday)(tv, tz); 2163 } 2164 2165 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service, 2166 void *hints, void *rv) { 2167 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv); 2168 // We miss atomic synchronization in getaddrinfo, 2169 // and can report false race between malloc and free 2170 // inside of getaddrinfo. So ignore memory accesses. 2171 ThreadIgnoreBegin(thr, pc); 2172 int res = REAL(getaddrinfo)(node, service, hints, rv); 2173 ThreadIgnoreEnd(thr, pc); 2174 return res; 2175 } 2176 2177 TSAN_INTERCEPTOR(int, fork, int fake) { 2178 if (cur_thread()->in_symbolizer) 2179 return REAL(fork)(fake); 2180 SCOPED_INTERCEPTOR_RAW(fork, fake); 2181 ForkBefore(thr, pc); 2182 int pid = REAL(fork)(fake); 2183 if (pid == 0) { 2184 // child 2185 ForkChildAfter(thr, pc); 2186 FdOnFork(thr, pc); 2187 } else if (pid > 0) { 2188 // parent 2189 ForkParentAfter(thr, pc); 2190 } else { 2191 // error 2192 ForkParentAfter(thr, pc); 2193 } 2194 return pid; 2195 } 2196 2197 TSAN_INTERCEPTOR(int, vfork, int fake) { 2198 // Some programs (e.g. openjdk) call close for all file descriptors 2199 // in the child process. Under tsan it leads to false positives, because 2200 // address space is shared, so the parent process also thinks that 2201 // the descriptors are closed (while they are actually not). 2202 // This leads to false positives due to missed synchronization. 2203 // Strictly saying this is undefined behavior, because vfork child is not 2204 // allowed to call any functions other than exec/exit. But this is what 2205 // openjdk does, so we want to handle it. 2206 // We could disable interceptors in the child process. But it's not possible 2207 // to simply intercept and wrap vfork, because vfork child is not allowed 2208 // to return from the function that calls vfork, and that's exactly what 2209 // we would do. So this would require some assembly trickery as well. 2210 // Instead we simply turn vfork into fork. 2211 return WRAP(fork)(fake); 2212 } 2213 2214 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2215 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, 2216 void *data); 2217 struct dl_iterate_phdr_data { 2218 ThreadState *thr; 2219 uptr pc; 2220 dl_iterate_phdr_cb_t cb; 2221 void *data; 2222 }; 2223 2224 static bool IsAppNotRodata(uptr addr) { 2225 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata; 2226 } 2227 2228 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, 2229 void *data) { 2230 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; 2231 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later 2232 // accessible in dl_iterate_phdr callback. But we don't see synchronization 2233 // inside of dynamic linker, so we "unpoison" it here in order to not 2234 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough 2235 // because some libc functions call __libc_dlopen. 2236 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2237 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2238 internal_strlen(info->dlpi_name)); 2239 int res = cbdata->cb(info, size, cbdata->data); 2240 // Perform the check one more time in case info->dlpi_name was overwritten 2241 // by user callback. 2242 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2243 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2244 internal_strlen(info->dlpi_name)); 2245 return res; 2246 } 2247 2248 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { 2249 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data); 2250 dl_iterate_phdr_data cbdata; 2251 cbdata.thr = thr; 2252 cbdata.pc = pc; 2253 cbdata.cb = cb; 2254 cbdata.data = data; 2255 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); 2256 return res; 2257 } 2258 #endif 2259 2260 static int OnExit(ThreadState *thr) { 2261 int status = Finalize(thr); 2262 FlushStreams(); 2263 return status; 2264 } 2265 2266 struct TsanInterceptorContext { 2267 ThreadState *thr; 2268 const uptr caller_pc; 2269 const uptr pc; 2270 }; 2271 2272 #if !SANITIZER_MAC 2273 static void HandleRecvmsg(ThreadState *thr, uptr pc, 2274 __sanitizer_msghdr *msg) { 2275 int fds[64]; 2276 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds)); 2277 for (int i = 0; i < cnt; i++) 2278 FdEventCreate(thr, pc, fds[i]); 2279 } 2280 #endif 2281 2282 #include "sanitizer_common/sanitizer_platform_interceptors.h" 2283 // Causes interceptor recursion (getaddrinfo() and fopen()) 2284 #undef SANITIZER_INTERCEPT_GETADDRINFO 2285 // There interceptors do not seem to be strictly necessary for tsan. 2286 // But we see cases where the interceptors consume 70% of execution time. 2287 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times. 2288 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each 2289 // function "writes to" the buffer. Then, the same memory is "written to" 2290 // twice, first as buf and then as pwbufp (both of them refer to the same 2291 // addresses). 2292 #undef SANITIZER_INTERCEPT_GETPWENT 2293 #undef SANITIZER_INTERCEPT_GETPWENT_R 2294 #undef SANITIZER_INTERCEPT_FGETPWENT 2295 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 2296 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 2297 // __tls_get_addr can be called with mis-aligned stack due to: 2298 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 2299 // There are two potential issues: 2300 // 1. Sanitizer code contains a MOVDQA spill (it does not seem to be the case 2301 // right now). or 2. ProcessPendingSignal calls user handler which contains 2302 // MOVDQA spill (this happens right now). 2303 // Since the interceptor only initializes memory for msan, the simplest solution 2304 // is to disable the interceptor in tsan (other sanitizers do not call 2305 // signal handlers from COMMON_INTERCEPTOR_ENTER). 2306 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR 2307 2308 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) 2309 2310 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ 2311 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \ 2312 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \ 2313 true) 2314 2315 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ 2316 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \ 2317 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \ 2318 false) 2319 2320 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ 2321 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \ 2322 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ 2323 ctx = (void *)&_ctx; \ 2324 (void) ctx; 2325 2326 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \ 2327 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ 2328 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ 2329 ctx = (void *)&_ctx; \ 2330 (void) ctx; 2331 2332 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \ 2333 Acquire(thr, pc, File2addr(path)); \ 2334 if (file) { \ 2335 int fd = fileno_unlocked(file); \ 2336 if (fd >= 0) FdFileCreate(thr, pc, fd); \ 2337 } 2338 2339 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ 2340 if (file) { \ 2341 int fd = fileno_unlocked(file); \ 2342 if (fd >= 0) FdClose(thr, pc, fd); \ 2343 } 2344 2345 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ 2346 libignore()->OnLibraryLoaded(filename) 2347 2348 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ 2349 libignore()->OnLibraryUnloaded() 2350 2351 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ 2352 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) 2353 2354 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ 2355 Release(((TsanInterceptorContext *) ctx)->thr, pc, u) 2356 2357 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ 2358 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) 2359 2360 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ 2361 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2362 2363 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ 2364 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2365 2366 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \ 2367 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2368 2369 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ 2370 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd) 2371 2372 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ 2373 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) 2374 2375 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ 2376 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name) 2377 2378 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) 2379 2380 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ 2381 OnExit(((TsanInterceptorContext *) ctx)->thr) 2382 2383 #define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \ 2384 MutexLock(((TsanInterceptorContext *)ctx)->thr, \ 2385 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2386 2387 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ 2388 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \ 2389 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2390 2391 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \ 2392 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ 2393 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2394 2395 #if !SANITIZER_MAC 2396 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ 2397 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ 2398 ((TsanInterceptorContext *)ctx)->pc, msg) 2399 #endif 2400 2401 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ 2402 if (TsanThread *t = GetCurrentThread()) { \ 2403 *begin = t->tls_begin(); \ 2404 *end = t->tls_end(); \ 2405 } else { \ 2406 *begin = *end = 0; \ 2407 } 2408 2409 #include "sanitizer_common/sanitizer_common_interceptors.inc" 2410 2411 #define TSAN_SYSCALL() \ 2412 ThreadState *thr = cur_thread(); \ 2413 if (thr->ignore_interceptors) \ 2414 return; \ 2415 ScopedSyscall scoped_syscall(thr) \ 2416 /**/ 2417 2418 struct ScopedSyscall { 2419 ThreadState *thr; 2420 2421 explicit ScopedSyscall(ThreadState *thr) 2422 : thr(thr) { 2423 Initialize(thr); 2424 } 2425 2426 ~ScopedSyscall() { 2427 ProcessPendingSignals(thr); 2428 } 2429 }; 2430 2431 #if !SANITIZER_MAC 2432 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { 2433 TSAN_SYSCALL(); 2434 MemoryAccessRange(thr, pc, p, s, write); 2435 } 2436 2437 static void syscall_acquire(uptr pc, uptr addr) { 2438 TSAN_SYSCALL(); 2439 Acquire(thr, pc, addr); 2440 DPrintf("syscall_acquire(%p)\n", addr); 2441 } 2442 2443 static void syscall_release(uptr pc, uptr addr) { 2444 TSAN_SYSCALL(); 2445 DPrintf("syscall_release(%p)\n", addr); 2446 Release(thr, pc, addr); 2447 } 2448 2449 static void syscall_fd_close(uptr pc, int fd) { 2450 TSAN_SYSCALL(); 2451 FdClose(thr, pc, fd); 2452 } 2453 2454 static USED void syscall_fd_acquire(uptr pc, int fd) { 2455 TSAN_SYSCALL(); 2456 FdAcquire(thr, pc, fd); 2457 DPrintf("syscall_fd_acquire(%p)\n", fd); 2458 } 2459 2460 static USED void syscall_fd_release(uptr pc, int fd) { 2461 TSAN_SYSCALL(); 2462 DPrintf("syscall_fd_release(%p)\n", fd); 2463 FdRelease(thr, pc, fd); 2464 } 2465 2466 static void syscall_pre_fork(uptr pc) { 2467 TSAN_SYSCALL(); 2468 ForkBefore(thr, pc); 2469 } 2470 2471 static void syscall_post_fork(uptr pc, int pid) { 2472 TSAN_SYSCALL(); 2473 if (pid == 0) { 2474 // child 2475 ForkChildAfter(thr, pc); 2476 FdOnFork(thr, pc); 2477 } else if (pid > 0) { 2478 // parent 2479 ForkParentAfter(thr, pc); 2480 } else { 2481 // error 2482 ForkParentAfter(thr, pc); 2483 } 2484 } 2485 #endif 2486 2487 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ 2488 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) 2489 2490 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ 2491 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true) 2492 2493 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ 2494 do { \ 2495 (void)(p); \ 2496 (void)(s); \ 2497 } while (false) 2498 2499 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ 2500 do { \ 2501 (void)(p); \ 2502 (void)(s); \ 2503 } while (false) 2504 2505 #define COMMON_SYSCALL_ACQUIRE(addr) \ 2506 syscall_acquire(GET_CALLER_PC(), (uptr)(addr)) 2507 2508 #define COMMON_SYSCALL_RELEASE(addr) \ 2509 syscall_release(GET_CALLER_PC(), (uptr)(addr)) 2510 2511 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd) 2512 2513 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd) 2514 2515 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd) 2516 2517 #define COMMON_SYSCALL_PRE_FORK() \ 2518 syscall_pre_fork(GET_CALLER_PC()) 2519 2520 #define COMMON_SYSCALL_POST_FORK(res) \ 2521 syscall_post_fork(GET_CALLER_PC(), res) 2522 2523 #include "sanitizer_common/sanitizer_common_syscalls.inc" 2524 2525 namespace __tsan { 2526 2527 static void finalize(void *arg) { 2528 ThreadState *thr = cur_thread(); 2529 int status = Finalize(thr); 2530 // Make sure the output is not lost. 2531 FlushStreams(); 2532 if (status) 2533 Die(); 2534 } 2535 2536 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2537 static void unreachable() { 2538 Report("FATAL: ThreadSanitizer: unreachable called\n"); 2539 Die(); 2540 } 2541 #endif 2542 2543 void InitializeInterceptors() { 2544 #if !SANITIZER_MAC 2545 // We need to setup it early, because functions like dlsym() can call it. 2546 REAL(memset) = internal_memset; 2547 REAL(memcpy) = internal_memcpy; 2548 #endif 2549 2550 // Instruct libc malloc to consume less memory. 2551 #if SANITIZER_LINUX 2552 mallopt(1, 0); // M_MXFAST 2553 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD 2554 #endif 2555 2556 InitializeCommonInterceptors(); 2557 2558 #if !SANITIZER_MAC 2559 // We can not use TSAN_INTERCEPT to get setjmp addr, 2560 // because it does &setjmp and setjmp is not present in some versions of libc. 2561 using __interception::GetRealFunctionAddress; 2562 GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0); 2563 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); 2564 GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0); 2565 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); 2566 #endif 2567 2568 TSAN_INTERCEPT(longjmp); 2569 TSAN_INTERCEPT(siglongjmp); 2570 2571 TSAN_INTERCEPT(malloc); 2572 TSAN_INTERCEPT(__libc_memalign); 2573 TSAN_INTERCEPT(calloc); 2574 TSAN_INTERCEPT(realloc); 2575 TSAN_INTERCEPT(free); 2576 TSAN_INTERCEPT(cfree); 2577 TSAN_INTERCEPT(mmap); 2578 TSAN_MAYBE_INTERCEPT_MMAP64; 2579 TSAN_INTERCEPT(munmap); 2580 TSAN_MAYBE_INTERCEPT_MEMALIGN; 2581 TSAN_INTERCEPT(valloc); 2582 TSAN_MAYBE_INTERCEPT_PVALLOC; 2583 TSAN_INTERCEPT(posix_memalign); 2584 2585 TSAN_INTERCEPT(strlen); 2586 TSAN_INTERCEPT(memset); 2587 TSAN_INTERCEPT(memcpy); 2588 TSAN_INTERCEPT(memmove); 2589 TSAN_INTERCEPT(strchr); 2590 TSAN_INTERCEPT(strchrnul); 2591 TSAN_INTERCEPT(strrchr); 2592 TSAN_INTERCEPT(strcpy); // NOLINT 2593 TSAN_INTERCEPT(strncpy); 2594 TSAN_INTERCEPT(strdup); 2595 2596 TSAN_INTERCEPT(pthread_create); 2597 TSAN_INTERCEPT(pthread_join); 2598 TSAN_INTERCEPT(pthread_detach); 2599 2600 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); 2601 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); 2602 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); 2603 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); 2604 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); 2605 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); 2606 2607 TSAN_INTERCEPT(pthread_mutex_init); 2608 TSAN_INTERCEPT(pthread_mutex_destroy); 2609 TSAN_INTERCEPT(pthread_mutex_trylock); 2610 TSAN_INTERCEPT(pthread_mutex_timedlock); 2611 2612 TSAN_INTERCEPT(pthread_spin_init); 2613 TSAN_INTERCEPT(pthread_spin_destroy); 2614 TSAN_INTERCEPT(pthread_spin_lock); 2615 TSAN_INTERCEPT(pthread_spin_trylock); 2616 TSAN_INTERCEPT(pthread_spin_unlock); 2617 2618 TSAN_INTERCEPT(pthread_rwlock_init); 2619 TSAN_INTERCEPT(pthread_rwlock_destroy); 2620 TSAN_INTERCEPT(pthread_rwlock_rdlock); 2621 TSAN_INTERCEPT(pthread_rwlock_tryrdlock); 2622 TSAN_INTERCEPT(pthread_rwlock_timedrdlock); 2623 TSAN_INTERCEPT(pthread_rwlock_wrlock); 2624 TSAN_INTERCEPT(pthread_rwlock_trywrlock); 2625 TSAN_INTERCEPT(pthread_rwlock_timedwrlock); 2626 TSAN_INTERCEPT(pthread_rwlock_unlock); 2627 2628 TSAN_INTERCEPT(pthread_barrier_init); 2629 TSAN_INTERCEPT(pthread_barrier_destroy); 2630 TSAN_INTERCEPT(pthread_barrier_wait); 2631 2632 TSAN_INTERCEPT(pthread_once); 2633 2634 TSAN_INTERCEPT(stat); 2635 TSAN_MAYBE_INTERCEPT___XSTAT; 2636 TSAN_MAYBE_INTERCEPT_STAT64; 2637 TSAN_MAYBE_INTERCEPT___XSTAT64; 2638 TSAN_INTERCEPT(lstat); 2639 TSAN_MAYBE_INTERCEPT___LXSTAT; 2640 TSAN_MAYBE_INTERCEPT_LSTAT64; 2641 TSAN_MAYBE_INTERCEPT___LXSTAT64; 2642 TSAN_INTERCEPT(fstat); 2643 TSAN_MAYBE_INTERCEPT___FXSTAT; 2644 TSAN_MAYBE_INTERCEPT_FSTAT64; 2645 TSAN_MAYBE_INTERCEPT___FXSTAT64; 2646 TSAN_INTERCEPT(open); 2647 TSAN_MAYBE_INTERCEPT_OPEN64; 2648 TSAN_INTERCEPT(creat); 2649 TSAN_MAYBE_INTERCEPT_CREAT64; 2650 TSAN_INTERCEPT(dup); 2651 TSAN_INTERCEPT(dup2); 2652 TSAN_INTERCEPT(dup3); 2653 TSAN_MAYBE_INTERCEPT_EVENTFD; 2654 TSAN_MAYBE_INTERCEPT_SIGNALFD; 2655 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT; 2656 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1; 2657 TSAN_INTERCEPT(socket); 2658 TSAN_INTERCEPT(socketpair); 2659 TSAN_INTERCEPT(connect); 2660 TSAN_INTERCEPT(bind); 2661 TSAN_INTERCEPT(listen); 2662 TSAN_MAYBE_INTERCEPT_EPOLL_CREATE; 2663 TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1; 2664 TSAN_INTERCEPT(close); 2665 TSAN_MAYBE_INTERCEPT___CLOSE; 2666 TSAN_MAYBE_INTERCEPT___RES_ICLOSE; 2667 TSAN_INTERCEPT(pipe); 2668 TSAN_INTERCEPT(pipe2); 2669 2670 TSAN_INTERCEPT(send); 2671 TSAN_INTERCEPT(sendmsg); 2672 TSAN_INTERCEPT(recv); 2673 2674 TSAN_INTERCEPT(unlink); 2675 TSAN_INTERCEPT(tmpfile); 2676 TSAN_MAYBE_INTERCEPT_TMPFILE64; 2677 TSAN_INTERCEPT(fread); 2678 TSAN_INTERCEPT(fwrite); 2679 TSAN_INTERCEPT(abort); 2680 TSAN_INTERCEPT(puts); 2681 TSAN_INTERCEPT(rmdir); 2682 TSAN_INTERCEPT(closedir); 2683 2684 TSAN_MAYBE_INTERCEPT_EPOLL_CTL; 2685 TSAN_MAYBE_INTERCEPT_EPOLL_WAIT; 2686 2687 TSAN_INTERCEPT(sigaction); 2688 TSAN_INTERCEPT(signal); 2689 TSAN_INTERCEPT(sigsuspend); 2690 TSAN_INTERCEPT(raise); 2691 TSAN_INTERCEPT(kill); 2692 TSAN_INTERCEPT(pthread_kill); 2693 TSAN_INTERCEPT(sleep); 2694 TSAN_INTERCEPT(usleep); 2695 TSAN_INTERCEPT(nanosleep); 2696 TSAN_INTERCEPT(gettimeofday); 2697 TSAN_INTERCEPT(getaddrinfo); 2698 2699 TSAN_INTERCEPT(fork); 2700 TSAN_INTERCEPT(vfork); 2701 #if !SANITIZER_ANDROID 2702 TSAN_INTERCEPT(dl_iterate_phdr); 2703 #endif 2704 TSAN_INTERCEPT(on_exit); 2705 TSAN_INTERCEPT(__cxa_atexit); 2706 TSAN_INTERCEPT(_exit); 2707 2708 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2709 // Need to setup it, because interceptors check that the function is resolved. 2710 // But atexit is emitted directly into the module, so can't be resolved. 2711 REAL(atexit) = (int(*)(void(*)()))unreachable; 2712 #endif 2713 2714 if (REAL(__cxa_atexit)(&finalize, 0, 0)) { 2715 Printf("ThreadSanitizer: failed to setup atexit callback\n"); 2716 Die(); 2717 } 2718 2719 #if !SANITIZER_MAC 2720 if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { 2721 Printf("ThreadSanitizer: failed to create thread key\n"); 2722 Die(); 2723 } 2724 #endif 2725 2726 FdInit(); 2727 } 2728 2729 } // namespace __tsan 2730 2731 // Invisible barrier for tests. 2732 // There were several unsuccessful iterations for this functionality: 2733 // 1. Initially it was implemented in user code using 2734 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on 2735 // MacOS. Futexes are linux-specific for this matter. 2736 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic 2737 // "as-if synchronized via sleep" messages in reports which failed some 2738 // output tests. 2739 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan- 2740 // visible events, which lead to "failed to restore stack trace" failures. 2741 // Note that no_sanitize_thread attribute does not turn off atomic interception 2742 // so attaching it to the function defined in user code does not help. 2743 // That's why we now have what we have. 2744 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 2745 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) { 2746 if (count >= (1 << 8)) { 2747 Printf("barrier_init: count is too large (%d)\n", count); 2748 Die(); 2749 } 2750 // 8 lsb is thread count, the remaining are count of entered threads. 2751 *barrier = count; 2752 } 2753 2754 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 2755 void __tsan_testonly_barrier_wait(u64 *barrier) { 2756 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED); 2757 unsigned old_epoch = (old >> 8) / (old & 0xff); 2758 for (;;) { 2759 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED); 2760 unsigned cur_epoch = (cur >> 8) / (cur & 0xff); 2761 if (cur_epoch != old_epoch) 2762 return; 2763 internal_sched_yield(); 2764 } 2765 } 2766