1 //===-- tsan_platform_linux.cc --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 // Linux- and FreeBSD-specific code. 13 //===----------------------------------------------------------------------===// 14 15 16 #include "sanitizer_common/sanitizer_platform.h" 17 #if SANITIZER_LINUX || SANITIZER_FREEBSD 18 19 #include "sanitizer_common/sanitizer_common.h" 20 #include "sanitizer_common/sanitizer_libc.h" 21 #include "sanitizer_common/sanitizer_posix.h" 22 #include "sanitizer_common/sanitizer_procmaps.h" 23 #include "sanitizer_common/sanitizer_stoptheworld.h" 24 #include "sanitizer_common/sanitizer_stackdepot.h" 25 #include "tsan_platform.h" 26 #include "tsan_rtl.h" 27 #include "tsan_flags.h" 28 29 #include <fcntl.h> 30 #include <pthread.h> 31 #include <signal.h> 32 #include <stdio.h> 33 #include <stdlib.h> 34 #include <string.h> 35 #include <stdarg.h> 36 #include <sys/mman.h> 37 #include <sys/syscall.h> 38 #include <sys/socket.h> 39 #include <sys/time.h> 40 #include <sys/types.h> 41 #include <sys/resource.h> 42 #include <sys/stat.h> 43 #include <unistd.h> 44 #include <errno.h> 45 #include <sched.h> 46 #include <dlfcn.h> 47 #if SANITIZER_LINUX 48 #define __need_res_state 49 #include <resolv.h> 50 #endif 51 52 #ifdef sa_handler 53 # undef sa_handler 54 #endif 55 56 #ifdef sa_sigaction 57 # undef sa_sigaction 58 #endif 59 60 #if SANITIZER_FREEBSD 61 extern "C" void *__libc_stack_end; 62 void *__libc_stack_end = 0; 63 #endif 64 65 namespace __tsan { 66 67 static uptr g_data_start; 68 static uptr g_data_end; 69 70 enum { 71 MemTotal = 0, 72 MemShadow = 1, 73 MemMeta = 2, 74 MemFile = 3, 75 MemMmap = 4, 76 MemTrace = 5, 77 MemHeap = 6, 78 MemOther = 7, 79 MemCount = 8, 80 }; 81 82 void FillProfileCallback(uptr p, uptr rss, bool file, 83 uptr *mem, uptr stats_size) { 84 mem[MemTotal] += rss; 85 if (p >= kShadowBeg && p < kShadowEnd) 86 mem[MemShadow] += rss; 87 else if (p >= kMetaShadowBeg && p < kMetaShadowEnd) 88 mem[MemMeta] += rss; 89 #ifndef SANITIZER_GO 90 else if (p >= kHeapMemBeg && p < kHeapMemEnd) 91 mem[MemHeap] += rss; 92 else if (p >= kLoAppMemBeg && p < kLoAppMemEnd) 93 mem[file ? MemFile : MemMmap] += rss; 94 else if (p >= kHiAppMemBeg && p < kHiAppMemEnd) 95 mem[file ? MemFile : MemMmap] += rss; 96 #else 97 else if (p >= kAppMemBeg && p < kAppMemEnd) 98 mem[file ? MemFile : MemMmap] += rss; 99 #endif 100 else if (p >= kTraceMemBeg && p < kTraceMemEnd) 101 mem[MemTrace] += rss; 102 else 103 mem[MemOther] += rss; 104 } 105 106 void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { 107 uptr mem[MemCount] = {}; 108 __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); 109 StackDepotStats *stacks = StackDepotGetStats(); 110 internal_snprintf(buf, buf_size, 111 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" 112 " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", 113 mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, 114 mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, 115 mem[MemHeap] >> 20, mem[MemOther] >> 20, 116 stacks->allocated >> 20, stacks->n_uniq_ids, 117 nlive, nthread); 118 } 119 120 #if SANITIZER_LINUX 121 void FlushShadowMemoryCallback( 122 const SuspendedThreadsList &suspended_threads_list, 123 void *argument) { 124 FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); 125 } 126 #endif 127 128 void FlushShadowMemory() { 129 #if SANITIZER_LINUX 130 StopTheWorld(FlushShadowMemoryCallback, 0); 131 #endif 132 } 133 134 #ifndef SANITIZER_GO 135 static void ProtectRange(uptr beg, uptr end) { 136 CHECK_LE(beg, end); 137 if (beg == end) 138 return; 139 if (beg != (uptr)MmapNoAccess(beg, end - beg)) { 140 Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); 141 Printf("FATAL: Make sure you are not using unlimited stack\n"); 142 Die(); 143 } 144 } 145 146 // Mark shadow for .rodata sections with the special kShadowRodata marker. 147 // Accesses to .rodata can't race, so this saves time, memory and trace space. 148 static void MapRodata() { 149 // First create temp file. 150 const char *tmpdir = GetEnv("TMPDIR"); 151 if (tmpdir == 0) 152 tmpdir = GetEnv("TEST_TMPDIR"); 153 #ifdef P_tmpdir 154 if (tmpdir == 0) 155 tmpdir = P_tmpdir; 156 #endif 157 if (tmpdir == 0) 158 return; 159 char name[256]; 160 internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", 161 tmpdir, (int)internal_getpid()); 162 uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); 163 if (internal_iserror(openrv)) 164 return; 165 internal_unlink(name); // Unlink it now, so that we can reuse the buffer. 166 fd_t fd = openrv; 167 // Fill the file with kShadowRodata. 168 const uptr kMarkerSize = 512 * 1024 / sizeof(u64); 169 InternalScopedBuffer<u64> marker(kMarkerSize); 170 // volatile to prevent insertion of memset 171 for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) 172 *p = kShadowRodata; 173 internal_write(fd, marker.data(), marker.size()); 174 // Map the file into memory. 175 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, 176 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); 177 if (internal_iserror(page)) { 178 internal_close(fd); 179 return; 180 } 181 // Map the file into shadow of .rodata sections. 182 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 183 uptr start, end, offset, prot; 184 // Reusing the buffer 'name'. 185 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) { 186 if (name[0] != 0 && name[0] != '[' 187 && (prot & MemoryMappingLayout::kProtectionRead) 188 && (prot & MemoryMappingLayout::kProtectionExecute) 189 && !(prot & MemoryMappingLayout::kProtectionWrite) 190 && IsAppMem(start)) { 191 // Assume it's .rodata 192 char *shadow_start = (char*)MemToShadow(start); 193 char *shadow_end = (char*)MemToShadow(end); 194 for (char *p = shadow_start; p < shadow_end; p += marker.size()) { 195 internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p), 196 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); 197 } 198 } 199 } 200 internal_close(fd); 201 } 202 203 void InitializeShadowMemory() { 204 // Map memory shadow. 205 uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, 206 kShadowEnd - kShadowBeg); 207 if (shadow != kShadowBeg) { 208 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); 209 Printf("FATAL: Make sure to compile with -fPIE and " 210 "to link with -pie (%p, %p).\n", shadow, kShadowBeg); 211 Die(); 212 } 213 // This memory range is used for thread stacks and large user mmaps. 214 // Frequently a thread uses only a small part of stack and similarly 215 // a program uses a small part of large mmap. On some programs 216 // we see 20% memory usage reduction without huge pages for this range. 217 // FIXME: don't use constants here. 218 #if defined(__x86_64__) 219 const uptr kMadviseRangeBeg = 0x7f0000000000ull; 220 const uptr kMadviseRangeSize = 0x010000000000ull; 221 #elif defined(__mips64) 222 const uptr kMadviseRangeBeg = 0xff00000000ull; 223 const uptr kMadviseRangeSize = 0x0100000000ull; 224 #endif 225 NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), 226 kMadviseRangeSize * kShadowMultiplier); 227 if (common_flags()->use_madv_dontdump) 228 DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); 229 DPrintf("memory shadow: %zx-%zx (%zuGB)\n", 230 kShadowBeg, kShadowEnd, 231 (kShadowEnd - kShadowBeg) >> 30); 232 233 // Map meta shadow. 234 uptr meta_size = kMetaShadowEnd - kMetaShadowBeg; 235 uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size); 236 if (meta != kMetaShadowBeg) { 237 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); 238 Printf("FATAL: Make sure to compile with -fPIE and " 239 "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg); 240 Die(); 241 } 242 if (common_flags()->use_madv_dontdump) 243 DontDumpShadowMemory(meta, meta_size); 244 DPrintf("meta shadow: %zx-%zx (%zuGB)\n", 245 meta, meta + meta_size, meta_size >> 30); 246 247 MapRodata(); 248 } 249 250 static void InitDataSeg() { 251 MemoryMappingLayout proc_maps(true); 252 uptr start, end, offset; 253 char name[128]; 254 #if SANITIZER_FREEBSD 255 // On FreeBSD BSS is usually the last block allocated within the 256 // low range and heap is the last block allocated within the range 257 // 0x800000000-0x8ffffffff. 258 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), 259 /*protection*/ 0)) { 260 DPrintf("%p-%p %p %s\n", start, end, offset, name); 261 if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 && 262 name[0] == '\0') { 263 g_data_start = start; 264 g_data_end = end; 265 } 266 } 267 #else 268 bool prev_is_data = false; 269 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), 270 /*protection*/ 0)) { 271 DPrintf("%p-%p %p %s\n", start, end, offset, name); 272 bool is_data = offset != 0 && name[0] != 0; 273 // BSS may get merged with [heap] in /proc/self/maps. This is not very 274 // reliable. 275 bool is_bss = offset == 0 && 276 (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data; 277 if (g_data_start == 0 && is_data) 278 g_data_start = start; 279 if (is_bss) 280 g_data_end = end; 281 prev_is_data = is_data; 282 } 283 #endif 284 DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end); 285 CHECK_LT(g_data_start, g_data_end); 286 CHECK_GE((uptr)&g_data_start, g_data_start); 287 CHECK_LT((uptr)&g_data_start, g_data_end); 288 } 289 290 static void CheckAndProtect() { 291 // Ensure that the binary is indeed compiled with -pie. 292 MemoryMappingLayout proc_maps(true); 293 uptr p, end; 294 while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) { 295 if (IsAppMem(p)) 296 continue; 297 if (p >= kHeapMemEnd && 298 p < HeapEnd()) 299 continue; 300 if (p >= kVdsoBeg) // vdso 301 break; 302 Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); 303 Die(); 304 } 305 306 ProtectRange(kLoAppMemEnd, kShadowBeg); 307 ProtectRange(kShadowEnd, kMetaShadowBeg); 308 ProtectRange(kMetaShadowEnd, kTraceMemBeg); 309 // Memory for traces is mapped lazily in MapThreadTrace. 310 // Protect the whole range for now, so that user does not map something here. 311 ProtectRange(kTraceMemBeg, kTraceMemEnd); 312 ProtectRange(kTraceMemEnd, kHeapMemBeg); 313 ProtectRange(HeapEnd(), kHiAppMemBeg); 314 } 315 #endif // #ifndef SANITIZER_GO 316 317 void InitializePlatform() { 318 DisableCoreDumperIfNecessary(); 319 320 // Go maps shadow memory lazily and works fine with limited address space. 321 // Unlimited stack is not a problem as well, because the executable 322 // is not compiled with -pie. 323 if (kCppMode) { 324 bool reexec = false; 325 // TSan doesn't play well with unlimited stack size (as stack 326 // overlaps with shadow memory). If we detect unlimited stack size, 327 // we re-exec the program with limited stack size as a best effort. 328 if (StackSizeIsUnlimited()) { 329 const uptr kMaxStackSize = 32 * 1024 * 1024; 330 VReport(1, "Program is run with unlimited stack size, which wouldn't " 331 "work with ThreadSanitizer.\n" 332 "Re-execing with stack size limited to %zd bytes.\n", 333 kMaxStackSize); 334 SetStackSizeLimitInBytes(kMaxStackSize); 335 reexec = true; 336 } 337 338 if (!AddressSpaceIsUnlimited()) { 339 Report("WARNING: Program is run with limited virtual address space," 340 " which wouldn't work with ThreadSanitizer.\n"); 341 Report("Re-execing with unlimited virtual address space.\n"); 342 SetAddressSpaceUnlimited(); 343 reexec = true; 344 } 345 if (reexec) 346 ReExec(); 347 } 348 349 #ifndef SANITIZER_GO 350 CheckAndProtect(); 351 InitTlsSize(); 352 InitDataSeg(); 353 #endif 354 } 355 356 bool IsGlobalVar(uptr addr) { 357 return g_data_start && addr >= g_data_start && addr < g_data_end; 358 } 359 360 #ifndef SANITIZER_GO 361 // Extract file descriptors passed to glibc internal __res_iclose function. 362 // This is required to properly "close" the fds, because we do not see internal 363 // closes within glibc. The code is a pure hack. 364 int ExtractResolvFDs(void *state, int *fds, int nfd) { 365 #if SANITIZER_LINUX 366 int cnt = 0; 367 __res_state *statp = (__res_state*)state; 368 for (int i = 0; i < MAXNS && cnt < nfd; i++) { 369 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) 370 fds[cnt++] = statp->_u._ext.nssocks[i]; 371 } 372 return cnt; 373 #else 374 return 0; 375 #endif 376 } 377 378 // Extract file descriptors passed via UNIX domain sockets. 379 // This is requried to properly handle "open" of these fds. 380 // see 'man recvmsg' and 'man 3 cmsg'. 381 int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { 382 int res = 0; 383 msghdr *msg = (msghdr*)msgp; 384 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); 385 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 386 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) 387 continue; 388 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); 389 for (int i = 0; i < n; i++) { 390 fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; 391 if (res == nfd) 392 return res; 393 } 394 } 395 return res; 396 } 397 398 // Note: this function runs with async signals enabled, 399 // so it must not touch any tsan state. 400 int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, 401 void *abstime), void *c, void *m, void *abstime, 402 void(*cleanup)(void *arg), void *arg) { 403 // pthread_cleanup_push/pop are hardcore macros mess. 404 // We can't intercept nor call them w/o including pthread.h. 405 int res; 406 pthread_cleanup_push(cleanup, arg); 407 res = fn(c, m, abstime); 408 pthread_cleanup_pop(0); 409 return res; 410 } 411 #endif 412 413 } // namespace __tsan 414 415 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD 416