1 /* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche (at) acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22 */ 23 24 25 #include "drd_barrier.h" 26 #include "drd_clientobj.h" 27 #include "drd_clientreq.h" 28 #include "drd_cond.h" 29 #include "drd_error.h" 30 #include "drd_hb.h" 31 #include "drd_load_store.h" 32 #include "drd_malloc_wrappers.h" 33 #include "drd_mutex.h" 34 #include "drd_rwlock.h" 35 #include "drd_segment.h" 36 #include "drd_semaphore.h" 37 #include "drd_suppression.h" 38 #include "drd_thread.h" 39 #include "libvex_guest_offsets.h" 40 #include "pub_drd_bitmap.h" 41 #include "pub_tool_vki.h" // Must be included before pub_tool_libcproc 42 #include "pub_tool_basics.h" 43 #include "pub_tool_debuginfo.h" // VG_(describe_IP)() 44 #include "pub_tool_libcassert.h" // tl_assert() 45 #include "pub_tool_libcbase.h" // VG_(strcmp) 46 #include "pub_tool_libcprint.h" // VG_(printf) 47 #include "pub_tool_libcproc.h" 48 #include "pub_tool_machine.h" 49 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)() 50 #include "pub_tool_options.h" // command line options 51 #include "pub_tool_replacemalloc.h" 52 #include "pub_tool_threadstate.h" // VG_(get_running_tid)() 53 #include "pub_tool_tooliface.h" 54 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client) 55 56 57 /* Local variables. */ 58 59 static Bool s_print_stats; 60 static Bool s_var_info; 61 static Bool s_show_stack_usage; 62 static Bool s_trace_alloc; 63 static Bool trace_sectsuppr; 64 65 66 /** 67 * Implement the needs_command_line_options for drd. 68 */ 69 static Bool DRD_(process_cmd_line_option)(const HChar* arg) 70 { 71 int check_stack_accesses = -1; 72 int join_list_vol = -1; 73 int exclusive_threshold_ms = -1; 74 int first_race_only = -1; 75 int report_signal_unlocked = -1; 76 int segment_merging = -1; 77 int segment_merge_interval = -1; 78 int shared_threshold_ms = -1; 79 int show_confl_seg = -1; 80 int trace_barrier = -1; 81 int trace_clientobj = -1; 82 int trace_cond = -1; 83 int trace_csw = -1; 84 int trace_fork_join = -1; 85 int trace_hb = -1; 86 int trace_conflict_set = -1; 87 int trace_conflict_set_bm = -1; 88 int trace_mutex = -1; 89 int trace_rwlock = -1; 90 int trace_segment = -1; 91 int trace_semaphore = -1; 92 int trace_suppression = -1; 93 const HChar* trace_address = 0; 94 const HChar* ptrace_address= 0; 95 96 if VG_BOOL_CLO(arg, "--check-stack-var", check_stack_accesses) {} 97 else if VG_INT_CLO (arg, "--join-list-vol", join_list_vol) {} 98 else if VG_BOOL_CLO(arg, "--drd-stats", s_print_stats) {} 99 else if VG_BOOL_CLO(arg, "--first-race-only", first_race_only) {} 100 else if VG_BOOL_CLO(arg, "--free-is-write", DRD_(g_free_is_write)) {} 101 else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked) 102 {} 103 else if VG_BOOL_CLO(arg, "--segment-merging", segment_merging) {} 104 else if VG_INT_CLO (arg, "--segment-merging-interval", segment_merge_interval) 105 {} 106 else if VG_BOOL_CLO(arg, "--show-confl-seg", show_confl_seg) {} 107 else if VG_BOOL_CLO(arg, "--show-stack-usage", s_show_stack_usage) {} 108 else if VG_BOOL_CLO(arg, "--trace-alloc", s_trace_alloc) {} 109 else if VG_BOOL_CLO(arg, "--trace-barrier", trace_barrier) {} 110 else if VG_BOOL_CLO(arg, "--trace-clientobj", trace_clientobj) {} 111 else if VG_BOOL_CLO(arg, "--trace-cond", trace_cond) {} 112 else if VG_BOOL_CLO(arg, "--trace-conflict-set", trace_conflict_set) {} 113 else if VG_BOOL_CLO(arg, "--trace-conflict-set-bm", trace_conflict_set_bm){} 114 else if VG_BOOL_CLO(arg, "--trace-csw", trace_csw) {} 115 else if VG_BOOL_CLO(arg, "--trace-fork-join", trace_fork_join) {} 116 else if VG_BOOL_CLO(arg, "--trace-hb", trace_hb) {} 117 else if VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex) {} 118 else if VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock) {} 119 else if VG_BOOL_CLO(arg, "--trace-sectsuppr", trace_sectsuppr) {} 120 else if VG_BOOL_CLO(arg, "--trace-segment", trace_segment) {} 121 else if VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore) {} 122 else if VG_BOOL_CLO(arg, "--trace-suppr", trace_suppression) {} 123 else if VG_BOOL_CLO(arg, "--var-info", s_var_info) {} 124 else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {} 125 else if VG_STR_CLO (arg, "--ptrace-addr", ptrace_address) {} 126 else if VG_INT_CLO (arg, "--shared-threshold", shared_threshold_ms) {} 127 else if VG_STR_CLO (arg, "--trace-addr", trace_address) {} 128 else 129 return VG_(replacement_malloc_process_cmd_line_option)(arg); 130 131 if (check_stack_accesses != -1) 132 DRD_(set_check_stack_accesses)(check_stack_accesses); 133 if (exclusive_threshold_ms != -1) 134 { 135 DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms); 136 DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms); 137 } 138 if (first_race_only != -1) 139 { 140 DRD_(set_first_race_only)(first_race_only); 141 } 142 if (join_list_vol != -1) 143 DRD_(thread_set_join_list_vol)(join_list_vol); 144 if (report_signal_unlocked != -1) 145 { 146 DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked); 147 } 148 if (shared_threshold_ms != -1) 149 { 150 DRD_(rwlock_set_shared_threshold)(shared_threshold_ms); 151 } 152 if (segment_merging != -1) 153 DRD_(thread_set_segment_merging)(segment_merging); 154 if (segment_merge_interval != -1) 155 DRD_(thread_set_segment_merge_interval)(segment_merge_interval); 156 if (show_confl_seg != -1) 157 DRD_(set_show_conflicting_segments)(show_confl_seg); 158 if (trace_address) { 159 const Addr addr = VG_(strtoll16)(trace_address, 0); 160 DRD_(start_tracing_address_range)(addr, addr + 1, False); 161 } 162 if (ptrace_address) { 163 char *plus = VG_(strchr)(ptrace_address, '+'); 164 Addr addr, length; 165 if (plus) 166 *plus = '\0'; 167 addr = VG_(strtoll16)(ptrace_address, 0); 168 length = plus ? VG_(strtoll16)(plus + 1, 0) : 1; 169 DRD_(start_tracing_address_range)(addr, addr + length, True); 170 } 171 if (trace_barrier != -1) 172 DRD_(barrier_set_trace)(trace_barrier); 173 if (trace_clientobj != -1) 174 DRD_(clientobj_set_trace)(trace_clientobj); 175 if (trace_cond != -1) 176 DRD_(cond_set_trace)(trace_cond); 177 if (trace_csw != -1) 178 DRD_(thread_trace_context_switches)(trace_csw); 179 if (trace_fork_join != -1) 180 DRD_(thread_set_trace_fork_join)(trace_fork_join); 181 if (trace_hb != -1) 182 DRD_(hb_set_trace)(trace_hb); 183 if (trace_conflict_set != -1) 184 DRD_(thread_trace_conflict_set)(trace_conflict_set); 185 if (trace_conflict_set_bm != -1) 186 DRD_(thread_trace_conflict_set_bm)(trace_conflict_set_bm); 187 if (trace_mutex != -1) 188 DRD_(mutex_set_trace)(trace_mutex); 189 if (trace_rwlock != -1) 190 DRD_(rwlock_set_trace)(trace_rwlock); 191 if (trace_segment != -1) 192 DRD_(sg_set_trace)(trace_segment); 193 if (trace_semaphore != -1) 194 DRD_(semaphore_set_trace)(trace_semaphore); 195 if (trace_suppression != -1) 196 DRD_(suppression_set_trace)(trace_suppression); 197 198 return True; 199 } 200 201 static void DRD_(print_usage)(void) 202 { 203 VG_(printf)( 204 " --check-stack-var=yes|no Whether or not to report data races on\n" 205 " stack variables [no].\n" 206 " --exclusive-threshold=<n> Print an error message if any mutex or\n" 207 " writer lock is held longer than the specified\n" 208 " time (in milliseconds) [off].\n" 209 " --first-race-only=yes|no Only report the first data race that occurs on\n" 210 " a memory location instead of all races [no].\n" 211 " --free-is-write=yes|no Whether to report races between freeing memory\n" 212 " and subsequent accesses of that memory[no].\n" 213 " --join-list-vol=<n> Number of threads to delay cleanup for [10].\n" 214 " --report-signal-unlocked=yes|no Whether to report calls to\n" 215 " pthread_cond_signal() where the mutex associated\n" 216 " with the signal via pthread_cond_wait() is not\n" 217 " locked at the time the signal is sent [yes].\n" 218 " --segment-merging=yes|no Controls segment merging [yes].\n" 219 " Segment merging is an algorithm to limit memory usage of the\n" 220 " data race detection algorithm. Disabling segment merging may\n" 221 " improve the accuracy of the so-called 'other segments' displayed\n" 222 " in race reports but can also trigger an out of memory error.\n" 223 " --segment-merging-interval=<n> Perform segment merging every time n new\n" 224 " segments have been created. Default: %d.\n" 225 " --shared-threshold=<n> Print an error message if a reader lock\n" 226 " is held longer than the specified time (in\n" 227 " milliseconds) [off]\n" 228 " --show-confl-seg=yes|no Show conflicting segments in race reports [yes].\n" 229 " --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n" 230 "\n" 231 " drd options for monitoring process behavior:\n" 232 " --ptrace-addr=<address>[+<length>] Trace all load and store activity for\n" 233 " the specified address range and keep doing that\n" 234 " even after the memory at that address has been\n" 235 " freed and reallocated [off].\n" 236 " --trace-addr=<address> Trace all load and store activity for the\n" 237 " specified address [off].\n" 238 " --trace-alloc=yes|no Trace all memory allocations and deallocations\n" 239 " [no].\n" 240 " --trace-barrier=yes|no Trace all barrier activity [no].\n" 241 " --trace-cond=yes|no Trace all condition variable activity [no].\n" 242 " --trace-fork-join=yes|no Trace all thread fork/join activity [no].\n" 243 " --trace-hb=yes|no Trace ANNOTATE_HAPPENS_BEFORE() etc. [no].\n" 244 " --trace-mutex=yes|no Trace all mutex activity [no].\n" 245 " --trace-rwlock=yes|no Trace all reader-writer lock activity[no].\n" 246 " --trace-semaphore=yes|no Trace all semaphore activity [no].\n", 247 DRD_(thread_get_segment_merge_interval)() 248 ); 249 } 250 251 static void DRD_(print_debug_usage)(void) 252 { 253 VG_(printf)( 254 " --drd-stats=yes|no Print statistics about DRD activity [no].\n" 255 " --trace-clientobj=yes|no Trace all client object activity [no].\n" 256 " --trace-csw=yes|no Trace all scheduler context switches [no].\n" 257 " --trace-conflict-set=yes|no Trace all conflict set updates [no].\n" 258 " --trace-conflict-set-bm=yes|no Trace all conflict set bitmap\n" 259 " updates [no]. Note: enabling this option\n" 260 " will generate a lot of output !\n" 261 " --trace-sectsuppr=yes|no Trace which the dynamic library sections on\n" 262 " which data race detection is suppressed.\n" 263 " --trace-segment=yes|no Trace segment actions [no].\n" 264 " --trace-suppr=yes|no Trace all address suppression actions [no].\n" 265 ); 266 } 267 268 269 // 270 // Implements the thread-related core callbacks. 271 // 272 273 static void drd_pre_mem_read(const CorePart part, 274 const ThreadId tid, 275 const HChar* const s, 276 const Addr a, 277 const SizeT size) 278 { 279 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 280 if (size > 0) 281 { 282 DRD_(trace_load)(a, size); 283 } 284 } 285 286 static void drd_pre_mem_read_asciiz(const CorePart part, 287 const ThreadId tid, 288 const HChar* const s, 289 const Addr a) 290 { 291 const HChar* p = (void*)a; 292 SizeT size = 0; 293 294 // Don't segfault if the string starts in an obviously stupid 295 // place. Actually we should check the whole string, not just 296 // the start address, but that's too much trouble. At least 297 // checking the first byte is better than nothing. See #255009. 298 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ)) 299 return; 300 301 /* Note: the expression '*p' reads client memory and may crash if the */ 302 /* client provided an invalid pointer ! */ 303 while (*p) 304 { 305 p++; 306 size++; 307 } 308 if (size > 0) 309 { 310 DRD_(trace_load)(a, size); 311 } 312 } 313 314 static void drd_post_mem_write(const CorePart part, 315 const ThreadId tid, 316 const Addr a, 317 const SizeT size) 318 { 319 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 320 if (size > 0) 321 { 322 DRD_(trace_store)(a, size); 323 } 324 } 325 326 static __inline__ 327 void drd_start_using_mem(const Addr a1, const SizeT len, 328 const Bool is_stack_mem) 329 { 330 const Addr a2 = a1 + len; 331 332 tl_assert(a1 <= a2); 333 334 if (!is_stack_mem && s_trace_alloc) 335 DRD_(trace_msg)("Started using memory range 0x%lx + %ld%s", 336 a1, len, DRD_(running_thread_inside_pthread_create)() 337 ? " (inside pthread_create())" : ""); 338 339 if (!is_stack_mem && DRD_(g_free_is_write)) 340 DRD_(thread_stop_using_mem)(a1, a2); 341 342 if (UNLIKELY(DRD_(any_address_is_traced)())) 343 { 344 DRD_(trace_mem_access)(a1, len, eStart, 0, 0); 345 } 346 347 if (UNLIKELY(DRD_(running_thread_inside_pthread_create)())) 348 { 349 DRD_(start_suppression)(a1, a2, "pthread_create()"); 350 } 351 } 352 353 static void drd_start_using_mem_w_ecu(const Addr a1, 354 const SizeT len, 355 UInt ec_uniq) 356 { 357 drd_start_using_mem(a1, len, False); 358 } 359 360 static void drd_start_using_mem_w_tid(const Addr a1, 361 const SizeT len, 362 ThreadId tid) 363 { 364 drd_start_using_mem(a1, len, False); 365 } 366 367 static __inline__ 368 void drd_stop_using_mem(const Addr a1, const SizeT len, 369 const Bool is_stack_mem) 370 { 371 const Addr a2 = a1 + len; 372 373 tl_assert(a1 <= a2); 374 375 if (UNLIKELY(DRD_(any_address_is_traced)())) 376 DRD_(trace_mem_access)(a1, len, eEnd, 0, 0); 377 378 if (!is_stack_mem && s_trace_alloc) 379 DRD_(trace_msg)("Stopped using memory range 0x%lx + %ld", 380 a1, len); 381 382 if (!is_stack_mem || DRD_(get_check_stack_accesses)()) 383 { 384 if (is_stack_mem || !DRD_(g_free_is_write)) 385 DRD_(thread_stop_using_mem)(a1, a2); 386 else if (DRD_(g_free_is_write)) 387 DRD_(trace_store)(a1, len); 388 DRD_(clientobj_stop_using_mem)(a1, a2); 389 DRD_(suppression_stop_using_mem)(a1, a2); 390 } 391 } 392 393 static __inline__ 394 void drd_stop_using_nonstack_mem(const Addr a1, const SizeT len) 395 { 396 drd_stop_using_mem(a1, len, False); 397 } 398 399 /** 400 * Discard all information DRD has about memory accesses and client objects 401 * in the specified address range. 402 */ 403 void DRD_(clean_memory)(const Addr a1, const SizeT len) 404 { 405 const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1); 406 drd_stop_using_mem(a1, len, is_stack_memory); 407 drd_start_using_mem(a1, len, is_stack_memory); 408 } 409 410 /** 411 * Suppress data race reports on all addresses contained in .plt, .got and 412 * .got.plt sections inside the address range [ a, a + len [. The data in 413 * these sections is modified by _dl_relocate_object() every time a function 414 * in a shared library is called for the first time. Since the first call 415 * to a function in a shared library can happen from a multithreaded context, 416 * such calls can cause conflicting accesses. See also Ulrich Drepper's 417 * paper "How to Write Shared Libraries" for more information about relocation 418 * (http://people.redhat.com/drepper/dsohowto.pdf). 419 * Note: the contents of the .got section is only modified by the MIPS resolver. 420 */ 421 static void DRD_(suppress_relocation_conflicts)(const Addr a, const SizeT len) 422 { 423 const DebugInfo* di; 424 425 if (trace_sectsuppr) 426 VG_(dmsg)("Evaluating range @ 0x%lx size %ld\n", a, len); 427 428 for (di = VG_(next_DebugInfo)(0); di; di = VG_(next_DebugInfo)(di)) { 429 Addr avma; 430 SizeT size; 431 432 if (trace_sectsuppr) 433 VG_(dmsg)("Examining %s / %s\n", VG_(DebugInfo_get_filename)(di), 434 VG_(DebugInfo_get_soname)(di)); 435 436 /* 437 * Suppress the race report on the libpthread global variable 438 * __pthread_multiple_threads. See also 439 * http://bugs.kde.org/show_bug.cgi?id=323905. 440 */ 441 avma = VG_(DebugInfo_get_bss_avma)(di); 442 size = VG_(DebugInfo_get_bss_size)(di); 443 tl_assert((avma && size) || (avma == 0 && size == 0)); 444 if (size > 0 && 445 VG_(strcmp)(VG_(DebugInfo_get_soname)(di), "libpthread.so.0") == 0) { 446 if (trace_sectsuppr) 447 VG_(dmsg)("Suppressing .bss @ 0x%lx size %ld\n", avma, size); 448 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectBSS); 449 DRD_(start_suppression)(avma, avma + size, ".bss"); 450 } 451 452 avma = VG_(DebugInfo_get_plt_avma)(di); 453 size = VG_(DebugInfo_get_plt_size)(di); 454 tl_assert((avma && size) || (avma == 0 && size == 0)); 455 if (size > 0) { 456 if (trace_sectsuppr) 457 VG_(dmsg)("Suppressing .plt @ 0x%lx size %ld\n", avma, size); 458 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectPLT); 459 DRD_(start_suppression)(avma, avma + size, ".plt"); 460 } 461 462 avma = VG_(DebugInfo_get_gotplt_avma)(di); 463 size = VG_(DebugInfo_get_gotplt_size)(di); 464 tl_assert((avma && size) || (avma == 0 && size == 0)); 465 if (size > 0) { 466 if (trace_sectsuppr) 467 VG_(dmsg)("Suppressing .got.plt @ 0x%lx size %ld\n", avma, size); 468 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectGOTPLT); 469 DRD_(start_suppression)(avma, avma + size, ".gotplt"); 470 } 471 472 avma = VG_(DebugInfo_get_got_avma)(di); 473 size = VG_(DebugInfo_get_got_size)(di); 474 tl_assert((avma && size) || (avma == 0 && size == 0)); 475 if (size > 0) { 476 if (trace_sectsuppr) 477 VG_(dmsg)("Suppressing .got @ 0x%lx size %ld\n", avma, size); 478 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectGOT); 479 DRD_(start_suppression)(avma, avma + size, ".got"); 480 } 481 } 482 } 483 484 static 485 void drd_start_using_mem_w_perms(const Addr a, const SizeT len, 486 const Bool rr, const Bool ww, const Bool xx, 487 ULong di_handle) 488 { 489 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 490 491 drd_start_using_mem(a, len, False); 492 493 DRD_(suppress_relocation_conflicts)(a, len); 494 } 495 496 /** 497 * Called by the core when the stack of a thread grows, to indicate that 498 * the addresses in range [ a, a + len [ may now be used by the client. 499 * Assumption: stacks grow downward. 500 */ 501 static __inline__ 502 void drd_start_using_mem_stack2(const DrdThreadId tid, const Addr a, 503 const SizeT len) 504 { 505 DRD_(thread_set_stack_min)(tid, a - VG_STACK_REDZONE_SZB); 506 drd_start_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB, 507 True); 508 } 509 510 static __inline__ 511 void drd_start_using_mem_stack(const Addr a, const SizeT len) 512 { 513 drd_start_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len); 514 } 515 516 /** 517 * Called by the core when the stack of a thread shrinks, to indicate that 518 * the addresses [ a, a + len [ are no longer accessible for the client. 519 * Assumption: stacks grow downward. 520 */ 521 static __inline__ 522 void drd_stop_using_mem_stack2(const DrdThreadId tid, const Addr a, 523 const SizeT len) 524 { 525 DRD_(thread_set_stack_min)(tid, a + len - VG_STACK_REDZONE_SZB); 526 drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB, 527 True); 528 } 529 530 static __inline__ 531 void drd_stop_using_mem_stack(const Addr a, const SizeT len) 532 { 533 drd_stop_using_mem_stack2(DRD_(thread_get_running_tid)(), a, len); 534 } 535 536 static 537 Bool on_alt_stack(const Addr a) 538 { 539 ThreadId vg_tid; 540 Addr alt_min; 541 SizeT alt_size; 542 543 vg_tid = VG_(get_running_tid)(); 544 alt_min = VG_(thread_get_altstack_min)(vg_tid); 545 alt_size = VG_(thread_get_altstack_size)(vg_tid); 546 return (SizeT)(a - alt_min) < alt_size; 547 } 548 549 static 550 void drd_start_using_mem_alt_stack(const Addr a, const SizeT len) 551 { 552 if (!on_alt_stack(a)) 553 drd_start_using_mem_stack(a, len); 554 } 555 556 static 557 void drd_stop_using_mem_alt_stack(const Addr a, const SizeT len) 558 { 559 if (!on_alt_stack(a)) 560 drd_stop_using_mem_stack(a, len); 561 } 562 563 /** 564 * Callback function invoked by the Valgrind core before a signal is delivered. 565 */ 566 static 567 void drd_pre_deliver_signal(const ThreadId vg_tid, const Int sigNo, 568 const Bool alt_stack) 569 { 570 DrdThreadId drd_tid; 571 572 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid); 573 DRD_(thread_set_on_alt_stack)(drd_tid, alt_stack); 574 if (alt_stack) 575 { 576 /* 577 * As soon a signal handler has been invoked on the alternate stack, 578 * switch to stack memory handling functions that can handle the 579 * alternate stack. 580 */ 581 VG_(track_new_mem_stack)(drd_start_using_mem_alt_stack); 582 VG_(track_die_mem_stack)(drd_stop_using_mem_alt_stack); 583 } 584 } 585 586 /** 587 * Callback function invoked by the Valgrind core after a signal is delivered, 588 * at least if the signal handler did not longjmp(). 589 */ 590 static 591 void drd_post_deliver_signal(const ThreadId vg_tid, const Int sigNo) 592 { 593 DrdThreadId drd_tid; 594 595 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid); 596 DRD_(thread_set_on_alt_stack)(drd_tid, False); 597 if (DRD_(thread_get_threads_on_alt_stack)() == 0) 598 { 599 VG_(track_new_mem_stack)(drd_start_using_mem_stack); 600 VG_(track_die_mem_stack)(drd_stop_using_mem_stack); 601 } 602 } 603 604 /** 605 * Callback function called by the Valgrind core before a stack area is 606 * being used by a signal handler. 607 * 608 * @param[in] a Start of address range - VG_STACK_REDZONE_SZB. 609 * @param[in] len Address range length + VG_STACK_REDZONE_SZB. 610 * @param[in] tid Valgrind thread ID for whom the signal frame is being 611 * constructed. 612 */ 613 static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len, 614 ThreadId tid) 615 { 616 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 617 drd_start_using_mem(a + VG_STACK_REDZONE_SZB, len - VG_STACK_REDZONE_SZB, 618 True); 619 } 620 621 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len) 622 { 623 drd_stop_using_mem(a + VG_STACK_REDZONE_SZB, len - VG_STACK_REDZONE_SZB, 624 True); 625 } 626 627 static 628 void drd_pre_thread_create(const ThreadId creator, const ThreadId created) 629 { 630 const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator); 631 tl_assert(created != VG_INVALID_THREADID); 632 DRD_(thread_pre_create)(drd_creator, created); 633 if (DRD_(IsValidDrdThreadId)(drd_creator)) 634 { 635 DRD_(thread_new_segment)(drd_creator); 636 } 637 if (DRD_(thread_get_trace_fork_join)()) 638 { 639 DRD_(trace_msg)("drd_pre_thread_create creator = %d, created = %d", 640 drd_creator, created); 641 } 642 } 643 644 /** 645 * Called by Valgrind's core before any loads or stores are performed on 646 * the context of thread "created". 647 */ 648 static 649 void drd_post_thread_create(const ThreadId vg_created) 650 { 651 DrdThreadId drd_created; 652 Addr stack_max; 653 654 tl_assert(vg_created != VG_INVALID_THREADID); 655 656 drd_created = DRD_(thread_post_create)(vg_created); 657 658 /* Set up red zone before the code in glibc's clone.S is run. */ 659 stack_max = DRD_(thread_get_stack_max)(drd_created); 660 drd_start_using_mem_stack2(drd_created, stack_max, 0); 661 662 if (DRD_(thread_get_trace_fork_join)()) 663 { 664 DRD_(trace_msg)("drd_post_thread_create created = %d", drd_created); 665 } 666 if (! DRD_(get_check_stack_accesses)()) 667 { 668 DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created) 669 - DRD_(thread_get_stack_size)(drd_created), 670 DRD_(thread_get_stack_max)(drd_created), 671 "stack"); 672 } 673 } 674 675 /* Called after a thread has performed its last memory access. */ 676 static void drd_thread_finished(ThreadId vg_tid) 677 { 678 DrdThreadId drd_tid; 679 680 /* 681 * Ignore if invoked because thread creation failed. See e.g. 682 * coregrind/m_syswrap/syswrap-amd64-linux.c 683 */ 684 if (VG_(get_running_tid)() != vg_tid) 685 return; 686 687 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid); 688 tl_assert(drd_tid != DRD_INVALID_THREADID); 689 if (DRD_(thread_get_trace_fork_join)()) 690 { 691 DRD_(trace_msg)("drd_thread_finished tid = %d%s", drd_tid, 692 DRD_(thread_get_joinable)(drd_tid) 693 ? "" : " (which is a detached thread)"); 694 } 695 if (s_show_stack_usage && !VG_(clo_xml)) { 696 const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid); 697 const SizeT used_stack 698 = (DRD_(thread_get_stack_max)(drd_tid) 699 - DRD_(thread_get_stack_min_min)(drd_tid)); 700 VG_(message)(Vg_UserMsg, 701 "thread %d%s finished and used %ld bytes out of %ld" 702 " on its stack. Margin: %ld bytes.\n", 703 drd_tid, 704 DRD_(thread_get_joinable)(drd_tid) 705 ? "" : " (which is a detached thread)", 706 used_stack, stack_size, stack_size - used_stack); 707 708 } 709 drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid), 710 DRD_(thread_get_stack_max)(drd_tid) 711 - DRD_(thread_get_stack_min)(drd_tid), 712 True); 713 DRD_(thread_set_record_loads)(drd_tid, False); 714 DRD_(thread_set_record_stores)(drd_tid, False); 715 DRD_(thread_finished)(drd_tid); 716 } 717 718 /* 719 * Called immediately after fork for the child process only. 'tid' is the 720 * only surviving thread in the child process. Cleans up thread state. 721 * See also http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html for a detailed discussion of using fork() in combination with mutexes. 722 */ 723 static 724 void drd__atfork_child(ThreadId tid) 725 { 726 DRD_(drd_thread_atfork_child)(tid); 727 } 728 729 730 // 731 // Implementation of the tool interface. 732 // 733 734 static void DRD_(post_clo_init)(void) 735 { 736 #if defined(VGO_linux) || defined(VGO_darwin) 737 /* fine */ 738 #else 739 VG_(printf)("\nWARNING: DRD has not yet been tested on this operating system.\n\n"); 740 # endif 741 742 if (s_var_info) 743 { 744 VG_(needs_var_info)(); 745 } 746 } 747 748 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done) 749 { 750 tl_assert(tid == VG_(get_running_tid)()); 751 DRD_(thread_set_vg_running_tid)(tid); 752 } 753 754 static void DRD_(fini)(Int exitcode) 755 { 756 // thread_print_all(); 757 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) { 758 VG_(message)(Vg_UserMsg, "For counts of detected and suppressed errors, " 759 "rerun with: -v\n"); 760 } 761 762 if ((VG_(clo_stats) || s_print_stats) && !VG_(clo_xml)) 763 { 764 ULong pu = DRD_(thread_get_update_conflict_set_count)(); 765 ULong pu_seg_cr = DRD_(thread_get_update_conflict_set_new_sg_count)(); 766 ULong pu_mtx_cv = DRD_(thread_get_update_conflict_set_sync_count)(); 767 ULong pu_join = DRD_(thread_get_update_conflict_set_join_count)(); 768 769 VG_(message)(Vg_UserMsg, 770 " thread: %lld context switches.\n", 771 DRD_(thread_get_context_switch_count)()); 772 VG_(message)(Vg_UserMsg, 773 "confl set: %lld full updates and %lld partial updates;\n", 774 DRD_(thread_get_compute_conflict_set_count)(), 775 pu); 776 VG_(message)(Vg_UserMsg, 777 " %lld partial updates during segment creation,\n", 778 pu_seg_cr); 779 VG_(message)(Vg_UserMsg, 780 " %lld because of mutex/sema/cond.var. operations,\n", 781 pu_mtx_cv); 782 VG_(message)(Vg_UserMsg, 783 " %lld because of barrier/rwlock operations and\n", 784 pu - pu_seg_cr - pu_mtx_cv - pu_join); 785 VG_(message)(Vg_UserMsg, 786 " %lld partial updates because of thread join" 787 " operations.\n", 788 pu_join); 789 VG_(message)(Vg_UserMsg, 790 " segments: created %lld segments, max %lld alive,\n", 791 DRD_(sg_get_segments_created_count)(), 792 DRD_(sg_get_max_segments_alive_count)()); 793 VG_(message)(Vg_UserMsg, 794 " %lld discard points and %lld merges.\n", 795 DRD_(thread_get_discard_ordered_segments_count)(), 796 DRD_(sg_get_segment_merge_count)()); 797 VG_(message)(Vg_UserMsg, 798 "segmnt cr: %lld mutex, %lld rwlock, %lld semaphore and" 799 " %lld barrier.\n", 800 DRD_(get_mutex_segment_creation_count)(), 801 DRD_(get_rwlock_segment_creation_count)(), 802 DRD_(get_semaphore_segment_creation_count)(), 803 DRD_(get_barrier_segment_creation_count)()); 804 VG_(message)(Vg_UserMsg, 805 " bitmaps: %lld level one" 806 " and %lld level two bitmaps were allocated.\n", 807 DRD_(bm_get_bitmap_creation_count)(), 808 DRD_(bm_get_bitmap2_creation_count)()); 809 VG_(message)(Vg_UserMsg, 810 " mutex: %lld non-recursive lock/unlock events.\n", 811 DRD_(get_mutex_lock_count)()); 812 DRD_(print_malloc_stats)(); 813 } 814 815 DRD_(bm_module_cleanup)(); 816 } 817 818 static 819 void drd_pre_clo_init(void) 820 { 821 // Basic tool stuff. 822 VG_(details_name) ("drd"); 823 VG_(details_version) (NULL); 824 VG_(details_description) ("a thread error detector"); 825 VG_(details_copyright_author)("Copyright (C) 2006-2013, and GNU GPL'd," 826 " by Bart Van Assche."); 827 VG_(details_bug_reports_to) (VG_BUGS_TO); 828 829 VG_(basic_tool_funcs) (DRD_(post_clo_init), 830 DRD_(instrument), 831 DRD_(fini)); 832 833 // Command line stuff. 834 VG_(needs_command_line_options)(DRD_(process_cmd_line_option), 835 DRD_(print_usage), 836 DRD_(print_debug_usage)); 837 VG_(needs_xml_output) (); 838 839 // Error handling. 840 DRD_(register_error_handlers)(); 841 842 // Core event tracking. 843 VG_(track_pre_mem_read) (drd_pre_mem_read); 844 VG_(track_pre_mem_read_asciiz) (drd_pre_mem_read_asciiz); 845 VG_(track_post_mem_write) (drd_post_mem_write); 846 VG_(track_new_mem_brk) (drd_start_using_mem_w_tid); 847 VG_(track_new_mem_mmap) (drd_start_using_mem_w_perms); 848 VG_(track_new_mem_stack) (drd_start_using_mem_stack); 849 VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal); 850 VG_(track_new_mem_startup) (drd_start_using_mem_w_perms); 851 VG_(track_die_mem_brk) (drd_stop_using_nonstack_mem); 852 VG_(track_die_mem_munmap) (drd_stop_using_nonstack_mem); 853 VG_(track_die_mem_stack) (drd_stop_using_mem_stack); 854 VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal); 855 VG_(track_pre_deliver_signal) (drd_pre_deliver_signal); 856 VG_(track_post_deliver_signal) (drd_post_deliver_signal); 857 VG_(track_start_client_code) (drd_start_client_code); 858 VG_(track_pre_thread_ll_create) (drd_pre_thread_create); 859 VG_(track_pre_thread_first_insn)(drd_post_thread_create); 860 VG_(track_pre_thread_ll_exit) (drd_thread_finished); 861 VG_(atfork) (NULL/*pre*/, NULL/*parent*/, 862 drd__atfork_child/*child*/); 863 864 // Other stuff. 865 DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu, 866 drd_stop_using_nonstack_mem); 867 868 DRD_(bm_module_init)(); 869 870 DRD_(clientreq_init)(); 871 872 DRD_(suppression_init)(); 873 874 DRD_(clientobj_init)(); 875 876 DRD_(thread_init)(); 877 878 { 879 HChar* const smi = VG_(getenv)("DRD_SEGMENT_MERGING_INTERVAL"); 880 if (smi) 881 DRD_(thread_set_segment_merge_interval)(VG_(strtoll10)(smi, NULL)); 882 } 883 } 884 885 886 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init) 887