1 /* -*- mode: C; c-basic-offset: 3; indent-tabs-mode: nil; -*- */ 2 /* 3 This file is part of drd, a thread error detector. 4 5 Copyright (C) 2006-2011 Bart Van Assche <bvanassche (at) acm.org>. 6 7 This program is free software; you can redistribute it and/or 8 modify it under the terms of the GNU General Public License as 9 published by the Free Software Foundation; either version 2 of the 10 License, or (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 20 02111-1307, USA. 21 22 The GNU General Public License is contained in the file COPYING. 23 */ 24 25 26 #include "drd_barrier.h" 27 #include "drd_clientobj.h" 28 #include "drd_clientreq.h" 29 #include "drd_cond.h" 30 #include "drd_error.h" 31 #include "drd_hb.h" 32 #include "drd_load_store.h" 33 #include "drd_malloc_wrappers.h" 34 #include "drd_mutex.h" 35 #include "drd_rwlock.h" 36 #include "drd_segment.h" 37 #include "drd_semaphore.h" 38 #include "drd_suppression.h" 39 #include "drd_thread.h" 40 #include "libvex_guest_offsets.h" 41 #include "pub_drd_bitmap.h" 42 #include "pub_tool_vki.h" // Must be included before pub_tool_libcproc 43 #include "pub_tool_basics.h" 44 #include "pub_tool_debuginfo.h" // VG_(describe_IP)() 45 #include "pub_tool_libcassert.h" // tl_assert() 46 #include "pub_tool_libcbase.h" // VG_(strcmp) 47 #include "pub_tool_libcprint.h" // VG_(printf) 48 #include "pub_tool_libcproc.h" 49 #include "pub_tool_machine.h" 50 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)() 51 #include "pub_tool_options.h" // command line options 52 #include "pub_tool_replacemalloc.h" 53 #include "pub_tool_threadstate.h" // VG_(get_running_tid)() 54 #include "pub_tool_tooliface.h" 55 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client) 56 57 58 /* Local variables. */ 59 60 static Bool s_print_stats; 61 static Bool s_var_info; 62 static Bool s_show_stack_usage; 63 static Bool s_trace_alloc; 64 65 66 /** 67 * Implement the needs_command_line_options for drd. 68 */ 69 static Bool DRD_(process_cmd_line_option)(Char* arg) 70 { 71 int check_stack_accesses = -1; 72 int join_list_vol = -1; 73 int exclusive_threshold_ms = -1; 74 int first_race_only = -1; 75 int report_signal_unlocked = -1; 76 int segment_merging = -1; 77 int segment_merge_interval = -1; 78 int shared_threshold_ms = -1; 79 int show_confl_seg = -1; 80 int trace_barrier = -1; 81 int trace_clientobj = -1; 82 int trace_cond = -1; 83 int trace_csw = -1; 84 int trace_fork_join = -1; 85 int trace_hb = -1; 86 int trace_conflict_set = -1; 87 int trace_conflict_set_bm = -1; 88 int trace_mutex = -1; 89 int trace_rwlock = -1; 90 int trace_segment = -1; 91 int trace_semaphore = -1; 92 int trace_suppression = -1; 93 Char* trace_address = 0; 94 95 if VG_BOOL_CLO(arg, "--check-stack-var", check_stack_accesses) {} 96 else if VG_INT_CLO (arg, "--join-list-vol", join_list_vol) {} 97 else if VG_BOOL_CLO(arg, "--drd-stats", s_print_stats) {} 98 else if VG_BOOL_CLO(arg, "--first-race-only", first_race_only) {} 99 else if VG_BOOL_CLO(arg, "--free-is-write", DRD_(g_free_is_write)) {} 100 else if VG_BOOL_CLO(arg,"--report-signal-unlocked",report_signal_unlocked) 101 {} 102 else if VG_BOOL_CLO(arg, "--segment-merging", segment_merging) {} 103 else if VG_INT_CLO (arg, "--segment-merging-interval", segment_merge_interval) 104 {} 105 else if VG_BOOL_CLO(arg, "--show-confl-seg", show_confl_seg) {} 106 else if VG_BOOL_CLO(arg, "--show-stack-usage", s_show_stack_usage) {} 107 else if VG_BOOL_CLO(arg, "--trace-alloc", s_trace_alloc) {} 108 else if VG_BOOL_CLO(arg, "--trace-barrier", trace_barrier) {} 109 else if VG_BOOL_CLO(arg, "--trace-clientobj", trace_clientobj) {} 110 else if VG_BOOL_CLO(arg, "--trace-cond", trace_cond) {} 111 else if VG_BOOL_CLO(arg, "--trace-conflict-set", trace_conflict_set) {} 112 else if VG_BOOL_CLO(arg, "--trace-conflict-set-bm", trace_conflict_set_bm){} 113 else if VG_BOOL_CLO(arg, "--trace-csw", trace_csw) {} 114 else if VG_BOOL_CLO(arg, "--trace-fork-join", trace_fork_join) {} 115 else if VG_BOOL_CLO(arg, "--trace-hb", trace_hb) {} 116 else if VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex) {} 117 else if VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock) {} 118 else if VG_BOOL_CLO(arg, "--trace-segment", trace_segment) {} 119 else if VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore) {} 120 else if VG_BOOL_CLO(arg, "--trace-suppr", trace_suppression) {} 121 else if VG_BOOL_CLO(arg, "--var-info", s_var_info) {} 122 else if VG_INT_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms) {} 123 else if VG_INT_CLO (arg, "--shared-threshold", shared_threshold_ms) {} 124 else if VG_STR_CLO (arg, "--trace-addr", trace_address) {} 125 else 126 return VG_(replacement_malloc_process_cmd_line_option)(arg); 127 128 if (check_stack_accesses != -1) 129 DRD_(set_check_stack_accesses)(check_stack_accesses); 130 if (exclusive_threshold_ms != -1) 131 { 132 DRD_(mutex_set_lock_threshold)(exclusive_threshold_ms); 133 DRD_(rwlock_set_exclusive_threshold)(exclusive_threshold_ms); 134 } 135 if (first_race_only != -1) 136 { 137 DRD_(set_first_race_only)(first_race_only); 138 } 139 if (join_list_vol != -1) 140 DRD_(thread_set_join_list_vol)(join_list_vol); 141 if (report_signal_unlocked != -1) 142 { 143 DRD_(cond_set_report_signal_unlocked)(report_signal_unlocked); 144 } 145 if (shared_threshold_ms != -1) 146 { 147 DRD_(rwlock_set_shared_threshold)(shared_threshold_ms); 148 } 149 if (segment_merging != -1) 150 DRD_(thread_set_segment_merging)(segment_merging); 151 if (segment_merge_interval != -1) 152 DRD_(thread_set_segment_merge_interval)(segment_merge_interval); 153 if (show_confl_seg != -1) 154 DRD_(set_show_conflicting_segments)(show_confl_seg); 155 if (trace_address) 156 { 157 const Addr addr = VG_(strtoll16)(trace_address, 0); 158 DRD_(start_tracing_address_range)(addr, addr + 1); 159 } 160 if (trace_barrier != -1) 161 DRD_(barrier_set_trace)(trace_barrier); 162 if (trace_clientobj != -1) 163 DRD_(clientobj_set_trace)(trace_clientobj); 164 if (trace_cond != -1) 165 DRD_(cond_set_trace)(trace_cond); 166 if (trace_csw != -1) 167 DRD_(thread_trace_context_switches)(trace_csw); 168 if (trace_fork_join != -1) 169 DRD_(thread_set_trace_fork_join)(trace_fork_join); 170 if (trace_hb != -1) 171 DRD_(hb_set_trace)(trace_hb); 172 if (trace_conflict_set != -1) 173 DRD_(thread_trace_conflict_set)(trace_conflict_set); 174 if (trace_conflict_set_bm != -1) 175 DRD_(thread_trace_conflict_set_bm)(trace_conflict_set_bm); 176 if (trace_mutex != -1) 177 DRD_(mutex_set_trace)(trace_mutex); 178 if (trace_rwlock != -1) 179 DRD_(rwlock_set_trace)(trace_rwlock); 180 if (trace_segment != -1) 181 DRD_(sg_set_trace)(trace_segment); 182 if (trace_semaphore != -1) 183 DRD_(semaphore_set_trace)(trace_semaphore); 184 if (trace_suppression != -1) 185 DRD_(suppression_set_trace)(trace_suppression); 186 187 return True; 188 } 189 190 static void DRD_(print_usage)(void) 191 { 192 VG_(printf)( 193 " --check-stack-var=yes|no Whether or not to report data races on\n" 194 " stack variables [no].\n" 195 " --exclusive-threshold=<n> Print an error message if any mutex or\n" 196 " writer lock is held longer than the specified\n" 197 " time (in milliseconds) [off].\n" 198 " --first-race-only=yes|no Only report the first data race that occurs on\n" 199 " a memory location instead of all races [no].\n" 200 " --free-is-write=yes|no Whether to report races between freeing memory\n" 201 " and subsequent accesses of that memory[no].\n" 202 " --join-list-vol=<n> Number of threads to delay cleanup for [10].\n" 203 " --report-signal-unlocked=yes|no Whether to report calls to\n" 204 " pthread_cond_signal() where the mutex associated\n" 205 " with the signal via pthread_cond_wait() is not\n" 206 " locked at the time the signal is sent [yes].\n" 207 " --segment-merging=yes|no Controls segment merging [yes].\n" 208 " Segment merging is an algorithm to limit memory usage of the\n" 209 " data race detection algorithm. Disabling segment merging may\n" 210 " improve the accuracy of the so-called 'other segments' displayed\n" 211 " in race reports but can also trigger an out of memory error.\n" 212 " --segment-merging-interval=<n> Perform segment merging every time n new\n" 213 " segments have been created. Default: %d.\n" 214 " --shared-threshold=<n> Print an error message if a reader lock\n" 215 " is held longer than the specified time (in\n" 216 " milliseconds) [off]\n" 217 " --show-confl-seg=yes|no Show conflicting segments in race reports [yes].\n" 218 " --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n" 219 "\n" 220 " drd options for monitoring process behavior:\n" 221 " --trace-addr=<address> Trace all load and store activity for the.\n" 222 " specified address [off].\n" 223 " --trace-alloc=yes|no Trace all memory allocations and deallocations\n"" [no].\n" 224 " --trace-barrier=yes|no Trace all barrier activity [no].\n" 225 " --trace-cond=yes|no Trace all condition variable activity [no].\n" 226 " --trace-fork-join=yes|no Trace all thread fork/join activity [no].\n" 227 " --trace-hb=yes|no Trace ANNOTATE_HAPPENS_BEFORE() etc. [no].\n" 228 " --trace-mutex=yes|no Trace all mutex activity [no].\n" 229 " --trace-rwlock=yes|no Trace all reader-writer lock activity[no].\n" 230 " --trace-semaphore=yes|no Trace all semaphore activity [no].\n", 231 DRD_(thread_get_segment_merge_interval)() 232 ); 233 } 234 235 static void DRD_(print_debug_usage)(void) 236 { 237 VG_(printf)( 238 " --drd-stats=yes|no Print statistics about DRD activity [no].\n" 239 " --trace-clientobj=yes|no Trace all client object activity [no].\n" 240 " --trace-csw=yes|no Trace all scheduler context switches [no].\n" 241 " --trace-conflict-set=yes|no Trace all conflict set updates [no].\n" 242 " --trace-conflict-set-bm=yes|no Trace all conflict set bitmap\n" 243 " updates [no]. Note: enabling this option\n" 244 " will generate a lot of output !\n" 245 " --trace-segment=yes|no Trace segment actions [no].\n" 246 " --trace-suppr=yes|no Trace all address suppression actions [no].\n" 247 ); 248 } 249 250 251 // 252 // Implements the thread-related core callbacks. 253 // 254 255 static void drd_pre_mem_read(const CorePart part, 256 const ThreadId tid, 257 Char* const s, 258 const Addr a, 259 const SizeT size) 260 { 261 if (size > 0) 262 { 263 DRD_(trace_load)(a, size); 264 } 265 } 266 267 static void drd_pre_mem_read_asciiz(const CorePart part, 268 const ThreadId tid, 269 Char* const s, 270 const Addr a) 271 { 272 const char* p = (void*)a; 273 SizeT size = 0; 274 275 // Don't segfault if the string starts in an obviously stupid 276 // place. Actually we should check the whole string, not just 277 // the start address, but that's too much trouble. At least 278 // checking the first byte is better than nothing. See #255009. 279 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ)) 280 return; 281 282 /* Note: the expression '*p' reads client memory and may crash if the */ 283 /* client provided an invalid pointer ! */ 284 while (*p) 285 { 286 p++; 287 size++; 288 } 289 if (size > 0) 290 { 291 DRD_(trace_load)(a, size); 292 } 293 } 294 295 static void drd_post_mem_write(const CorePart part, 296 const ThreadId tid, 297 const Addr a, 298 const SizeT size) 299 { 300 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 301 if (size > 0) 302 { 303 DRD_(trace_store)(a, size); 304 } 305 } 306 307 static __inline__ 308 void drd_start_using_mem(const Addr a1, const SizeT len, 309 const Bool is_stack_mem) 310 { 311 const Addr a2 = a1 + len; 312 313 tl_assert(a1 <= a2); 314 315 if (!is_stack_mem && s_trace_alloc) 316 DRD_(trace_msg)("Started using memory range 0x%lx + %ld%s", 317 a1, len, DRD_(running_thread_inside_pthread_create)() 318 ? " (inside pthread_create())" : ""); 319 320 if (!is_stack_mem && DRD_(g_free_is_write)) 321 DRD_(thread_stop_using_mem)(a1, a2); 322 323 if (UNLIKELY(DRD_(any_address_is_traced)())) 324 { 325 DRD_(trace_mem_access)(a1, len, eStart); 326 } 327 328 if (UNLIKELY(DRD_(running_thread_inside_pthread_create)())) 329 { 330 DRD_(start_suppression)(a1, a2, "pthread_create()"); 331 } 332 } 333 334 static void drd_start_using_mem_w_ecu(const Addr a1, 335 const SizeT len, 336 UInt ec_uniq) 337 { 338 drd_start_using_mem(a1, len, False); 339 } 340 341 static void drd_start_using_mem_w_tid(const Addr a1, 342 const SizeT len, 343 ThreadId tid) 344 { 345 drd_start_using_mem(a1, len, False); 346 } 347 348 static __inline__ 349 void drd_stop_using_mem(const Addr a1, const SizeT len, 350 const Bool is_stack_mem) 351 { 352 const Addr a2 = a1 + len; 353 354 tl_assert(a1 <= a2); 355 356 if (UNLIKELY(DRD_(any_address_is_traced)())) 357 DRD_(trace_mem_access)(a1, len, eEnd); 358 359 if (!is_stack_mem && s_trace_alloc) 360 DRD_(trace_msg)("Stopped using memory range 0x%lx + %ld", 361 a1, len); 362 363 if (!is_stack_mem || DRD_(get_check_stack_accesses)()) 364 { 365 if (is_stack_mem || !DRD_(g_free_is_write)) 366 DRD_(thread_stop_using_mem)(a1, a2); 367 else if (DRD_(g_free_is_write)) 368 DRD_(trace_store)(a1, len); 369 DRD_(clientobj_stop_using_mem)(a1, a2); 370 DRD_(suppression_stop_using_mem)(a1, a2); 371 } 372 } 373 374 static __inline__ 375 void drd_stop_using_nonstack_mem(const Addr a1, const SizeT len) 376 { 377 drd_stop_using_mem(a1, len, False); 378 } 379 380 /** 381 * Discard all information DRD has about memory accesses and client objects 382 * in the specified address range. 383 */ 384 void DRD_(clean_memory)(const Addr a1, const SizeT len) 385 { 386 const Bool is_stack_memory = DRD_(thread_address_on_any_stack)(a1); 387 drd_stop_using_mem(a1, len, is_stack_memory); 388 drd_start_using_mem(a1, len, is_stack_memory); 389 } 390 391 /** 392 * Suppress data race reports on all addresses contained in .plt and 393 * .got.plt sections inside the address range [ a, a + len [. The data in 394 * these sections is modified by _dl_relocate_object() every time a function 395 * in a shared library is called for the first time. Since the first call 396 * to a function in a shared library can happen from a multithreaded context, 397 * such calls can cause conflicting accesses. See also Ulrich Drepper's 398 * paper "How to Write Shared Libraries" for more information about relocation 399 * (http://people.redhat.com/drepper/dsohowto.pdf). 400 */ 401 static void DRD_(suppress_relocation_conflicts)(const Addr a, const SizeT len) 402 { 403 const DebugInfo* di; 404 405 #if 0 406 VG_(printf)("Evaluating range @ 0x%lx size %ld\n", a, len); 407 #endif 408 409 for (di = VG_(next_DebugInfo)(0); di; di = VG_(next_DebugInfo)(di)) 410 { 411 Addr avma; 412 SizeT size; 413 414 avma = VG_(DebugInfo_get_plt_avma)(di); 415 size = VG_(DebugInfo_get_plt_size)(di); 416 tl_assert((avma && size) || (avma == 0 && size == 0)); 417 if (size > 0) 418 { 419 #if 0 420 VG_(printf)("Suppressing .plt @ 0x%lx size %ld\n", avma, size); 421 #endif 422 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectPLT); 423 DRD_(start_suppression)(avma, avma + size, ".plt"); 424 } 425 426 avma = VG_(DebugInfo_get_gotplt_avma)(di); 427 size = VG_(DebugInfo_get_gotplt_size)(di); 428 tl_assert((avma && size) || (avma == 0 && size == 0)); 429 if (size > 0) 430 { 431 #if 0 432 VG_(printf)("Suppressing .got.plt @ 0x%lx size %ld\n", avma, size); 433 #endif 434 tl_assert(VG_(DebugInfo_sect_kind)(NULL, 0, avma) == Vg_SectGOTPLT); 435 DRD_(start_suppression)(avma, avma + size, ".gotplt"); 436 } 437 } 438 } 439 440 static 441 void drd_start_using_mem_w_perms(const Addr a, const SizeT len, 442 const Bool rr, const Bool ww, const Bool xx, 443 ULong di_handle) 444 { 445 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 446 447 drd_start_using_mem(a, len, False); 448 449 DRD_(suppress_relocation_conflicts)(a, len); 450 } 451 452 /* Called by the core when the stack of a thread grows, to indicate that */ 453 /* the addresses in range [ a, a + len [ may now be used by the client. */ 454 /* Assumption: stacks grow downward. */ 455 static __inline__ 456 void drd_start_using_mem_stack(const Addr a, const SizeT len) 457 { 458 DRD_(thread_set_stack_min)(DRD_(thread_get_running_tid)(), 459 a - VG_STACK_REDZONE_SZB); 460 drd_start_using_mem(a - VG_STACK_REDZONE_SZB, 461 len + VG_STACK_REDZONE_SZB, 462 True); 463 } 464 465 /* Called by the core when the stack of a thread shrinks, to indicate that */ 466 /* the addresses [ a, a + len [ are no longer accessible for the client. */ 467 /* Assumption: stacks grow downward. */ 468 static __inline__ 469 void drd_stop_using_mem_stack(const Addr a, const SizeT len) 470 { 471 DRD_(thread_set_stack_min)(DRD_(thread_get_running_tid)(), 472 a + len - VG_STACK_REDZONE_SZB); 473 drd_stop_using_mem(a - VG_STACK_REDZONE_SZB, len + VG_STACK_REDZONE_SZB, 474 True); 475 } 476 477 static 478 Bool on_alt_stack(const Addr a) 479 { 480 ThreadId vg_tid; 481 Addr alt_min; 482 SizeT alt_size; 483 484 vg_tid = VG_(get_running_tid)(); 485 alt_min = VG_(thread_get_altstack_min)(vg_tid); 486 alt_size = VG_(thread_get_altstack_size)(vg_tid); 487 return (SizeT)(a - alt_min) < alt_size; 488 } 489 490 static 491 void drd_start_using_mem_alt_stack(const Addr a, const SizeT len) 492 { 493 if (!on_alt_stack(a)) 494 drd_start_using_mem_stack(a, len); 495 } 496 497 static 498 void drd_stop_using_mem_alt_stack(const Addr a, const SizeT len) 499 { 500 if (!on_alt_stack(a)) 501 drd_stop_using_mem_stack(a, len); 502 } 503 504 /** 505 * Callback function invoked by the Valgrind core before a signal is delivered. 506 */ 507 static 508 void drd_pre_deliver_signal(const ThreadId vg_tid, const Int sigNo, 509 const Bool alt_stack) 510 { 511 DrdThreadId drd_tid; 512 513 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid); 514 DRD_(thread_set_on_alt_stack)(drd_tid, alt_stack); 515 if (alt_stack) 516 { 517 /* 518 * As soon a signal handler has been invoked on the alternate stack, 519 * switch to stack memory handling functions that can handle the 520 * alternate stack. 521 */ 522 VG_(track_new_mem_stack)(drd_start_using_mem_alt_stack); 523 VG_(track_die_mem_stack)(drd_stop_using_mem_alt_stack); 524 } 525 } 526 527 /** 528 * Callback function invoked by the Valgrind core after a signal is delivered, 529 * at least if the signal handler did not longjmp(). 530 */ 531 static 532 void drd_post_deliver_signal(const ThreadId vg_tid, const Int sigNo) 533 { 534 DrdThreadId drd_tid; 535 536 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid); 537 DRD_(thread_set_on_alt_stack)(drd_tid, False); 538 if (DRD_(thread_get_threads_on_alt_stack)() == 0) 539 { 540 VG_(track_new_mem_stack)(drd_start_using_mem_stack); 541 VG_(track_die_mem_stack)(drd_stop_using_mem_stack); 542 } 543 } 544 545 /** 546 * Callback function called by the Valgrind core before a stack area is 547 * being used by a signal handler. 548 * 549 * @param[in] a Start of address range. 550 * @param[in] len Address range length. 551 * @param[in] tid Valgrind thread ID for whom the signal frame is being 552 * constructed. 553 */ 554 static void drd_start_using_mem_stack_signal(const Addr a, const SizeT len, 555 ThreadId tid) 556 { 557 DRD_(thread_set_vg_running_tid)(VG_(get_running_tid)()); 558 drd_start_using_mem(a, len, True); 559 } 560 561 static void drd_stop_using_mem_stack_signal(Addr a, SizeT len) 562 { 563 drd_stop_using_mem(a, len, True); 564 } 565 566 static 567 void drd_pre_thread_create(const ThreadId creator, const ThreadId created) 568 { 569 const DrdThreadId drd_creator = DRD_(VgThreadIdToDrdThreadId)(creator); 570 tl_assert(created != VG_INVALID_THREADID); 571 DRD_(thread_pre_create)(drd_creator, created); 572 if (DRD_(IsValidDrdThreadId)(drd_creator)) 573 { 574 DRD_(thread_new_segment)(drd_creator); 575 } 576 if (DRD_(thread_get_trace_fork_join)()) 577 { 578 DRD_(trace_msg)("drd_pre_thread_create creator = %d, created = %d", 579 drd_creator, created); 580 } 581 } 582 583 /* Called by Valgrind's core before any loads or stores are performed on */ 584 /* the context of thread "created". At startup, this function is called */ 585 /* with arguments (0,1). */ 586 static 587 void drd_post_thread_create(const ThreadId vg_created) 588 { 589 DrdThreadId drd_created; 590 591 tl_assert(vg_created != VG_INVALID_THREADID); 592 593 drd_created = DRD_(thread_post_create)(vg_created); 594 if (DRD_(thread_get_trace_fork_join)()) 595 { 596 DRD_(trace_msg)("drd_post_thread_create created = %d", drd_created); 597 } 598 if (! DRD_(get_check_stack_accesses)()) 599 { 600 DRD_(start_suppression)(DRD_(thread_get_stack_max)(drd_created) 601 - DRD_(thread_get_stack_size)(drd_created), 602 DRD_(thread_get_stack_max)(drd_created), 603 "stack"); 604 } 605 } 606 607 /* Called after a thread has performed its last memory access. */ 608 static void drd_thread_finished(ThreadId vg_tid) 609 { 610 DrdThreadId drd_tid; 611 612 tl_assert(VG_(get_running_tid)() == vg_tid); 613 614 drd_tid = DRD_(VgThreadIdToDrdThreadId)(vg_tid); 615 if (DRD_(thread_get_trace_fork_join)()) 616 { 617 DRD_(trace_msg)("drd_thread_finished tid = %d%s", drd_tid, 618 DRD_(thread_get_joinable)(drd_tid) 619 ? "" : " (which is a detached thread)"); 620 } 621 if (s_show_stack_usage && !VG_(clo_xml)) { 622 const SizeT stack_size = DRD_(thread_get_stack_size)(drd_tid); 623 const SizeT used_stack 624 = (DRD_(thread_get_stack_max)(drd_tid) 625 - DRD_(thread_get_stack_min_min)(drd_tid)); 626 VG_(message)(Vg_UserMsg, 627 "thread %d%s finished and used %ld bytes out of %ld" 628 " on its stack. Margin: %ld bytes.\n", 629 drd_tid, 630 DRD_(thread_get_joinable)(drd_tid) 631 ? "" : " (which is a detached thread)", 632 used_stack, stack_size, stack_size - used_stack); 633 634 } 635 drd_stop_using_mem(DRD_(thread_get_stack_min)(drd_tid), 636 DRD_(thread_get_stack_max)(drd_tid) 637 - DRD_(thread_get_stack_min)(drd_tid), 638 True); 639 DRD_(thread_set_record_loads)(drd_tid, False); 640 DRD_(thread_set_record_stores)(drd_tid, False); 641 DRD_(thread_finished)(drd_tid); 642 } 643 644 /* 645 * Called immediately after fork for the child process only. 'tid' is the 646 * only surviving thread in the child process. Cleans up thread state. 647 * See also http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html for a detailed discussion of using fork() in combination with mutexes. 648 */ 649 static 650 void drd__atfork_child(ThreadId tid) 651 { 652 DRD_(drd_thread_atfork_child)(tid); 653 } 654 655 656 // 657 // Implementation of the tool interface. 658 // 659 660 static void DRD_(post_clo_init)(void) 661 { 662 #if defined(VGO_linux) || defined(VGO_darwin) 663 /* fine */ 664 #else 665 VG_(printf)("\nWARNING: DRD has not yet been tested on this operating system.\n\n"); 666 # endif 667 668 if (s_var_info) 669 { 670 VG_(needs_var_info)(); 671 } 672 } 673 674 static void drd_start_client_code(const ThreadId tid, const ULong bbs_done) 675 { 676 tl_assert(tid == VG_(get_running_tid)()); 677 DRD_(thread_set_vg_running_tid)(tid); 678 } 679 680 static void DRD_(fini)(Int exitcode) 681 { 682 // thread_print_all(); 683 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) { 684 VG_(message)(Vg_UserMsg, "For counts of detected and suppressed errors, " 685 "rerun with: -v\n"); 686 } 687 688 if ((VG_(clo_stats) || s_print_stats) && !VG_(clo_xml)) 689 { 690 ULong pu = DRD_(thread_get_update_conflict_set_count)(); 691 ULong pu_seg_cr = DRD_(thread_get_update_conflict_set_new_sg_count)(); 692 ULong pu_mtx_cv = DRD_(thread_get_update_conflict_set_sync_count)(); 693 ULong pu_join = DRD_(thread_get_update_conflict_set_join_count)(); 694 695 VG_(message)(Vg_UserMsg, 696 " thread: %lld context switches.\n", 697 DRD_(thread_get_context_switch_count)()); 698 VG_(message)(Vg_UserMsg, 699 "confl set: %lld full updates and %lld partial updates;\n", 700 DRD_(thread_get_compute_conflict_set_count)(), 701 pu); 702 VG_(message)(Vg_UserMsg, 703 " %lld partial updates during segment creation,\n", 704 pu_seg_cr); 705 VG_(message)(Vg_UserMsg, 706 " %lld because of mutex/sema/cond.var. operations,\n", 707 pu_mtx_cv); 708 VG_(message)(Vg_UserMsg, 709 " %lld because of barrier/rwlock operations and\n", 710 pu - pu_seg_cr - pu_mtx_cv - pu_join); 711 VG_(message)(Vg_UserMsg, 712 " %lld partial updates because of thread join" 713 " operations.\n", 714 pu_join); 715 VG_(message)(Vg_UserMsg, 716 " segments: created %lld segments, max %lld alive,\n", 717 DRD_(sg_get_segments_created_count)(), 718 DRD_(sg_get_max_segments_alive_count)()); 719 VG_(message)(Vg_UserMsg, 720 " %lld discard points and %lld merges.\n", 721 DRD_(thread_get_discard_ordered_segments_count)(), 722 DRD_(sg_get_segment_merge_count)()); 723 VG_(message)(Vg_UserMsg, 724 "segmnt cr: %lld mutex, %lld rwlock, %lld semaphore and" 725 " %lld barrier.\n", 726 DRD_(get_mutex_segment_creation_count)(), 727 DRD_(get_rwlock_segment_creation_count)(), 728 DRD_(get_semaphore_segment_creation_count)(), 729 DRD_(get_barrier_segment_creation_count)()); 730 VG_(message)(Vg_UserMsg, 731 " bitmaps: %lld level one" 732 " and %lld level two bitmaps were allocated.\n", 733 DRD_(bm_get_bitmap_creation_count)(), 734 DRD_(bm_get_bitmap2_creation_count)()); 735 VG_(message)(Vg_UserMsg, 736 " mutex: %lld non-recursive lock/unlock events.\n", 737 DRD_(get_mutex_lock_count)()); 738 DRD_(print_malloc_stats)(); 739 } 740 } 741 742 static 743 void drd_pre_clo_init(void) 744 { 745 // Basic tool stuff. 746 VG_(details_name) ("drd"); 747 VG_(details_version) (NULL); 748 VG_(details_description) ("a thread error detector"); 749 VG_(details_copyright_author)("Copyright (C) 2006-2011, and GNU GPL'd," 750 " by Bart Van Assche."); 751 VG_(details_bug_reports_to) (VG_BUGS_TO); 752 753 VG_(basic_tool_funcs) (DRD_(post_clo_init), 754 DRD_(instrument), 755 DRD_(fini)); 756 757 // Command line stuff. 758 VG_(needs_command_line_options)(DRD_(process_cmd_line_option), 759 DRD_(print_usage), 760 DRD_(print_debug_usage)); 761 VG_(needs_xml_output) (); 762 763 // Error handling. 764 DRD_(register_error_handlers)(); 765 766 // Core event tracking. 767 VG_(track_pre_mem_read) (drd_pre_mem_read); 768 VG_(track_pre_mem_read_asciiz) (drd_pre_mem_read_asciiz); 769 VG_(track_post_mem_write) (drd_post_mem_write); 770 VG_(track_new_mem_brk) (drd_start_using_mem_w_tid); 771 VG_(track_new_mem_mmap) (drd_start_using_mem_w_perms); 772 VG_(track_new_mem_stack) (drd_start_using_mem_stack); 773 VG_(track_new_mem_stack_signal) (drd_start_using_mem_stack_signal); 774 VG_(track_new_mem_startup) (drd_start_using_mem_w_perms); 775 VG_(track_die_mem_brk) (drd_stop_using_nonstack_mem); 776 VG_(track_die_mem_munmap) (drd_stop_using_nonstack_mem); 777 VG_(track_die_mem_stack) (drd_stop_using_mem_stack); 778 VG_(track_die_mem_stack_signal) (drd_stop_using_mem_stack_signal); 779 VG_(track_pre_deliver_signal) (drd_pre_deliver_signal); 780 VG_(track_post_deliver_signal) (drd_post_deliver_signal); 781 VG_(track_start_client_code) (drd_start_client_code); 782 VG_(track_pre_thread_ll_create) (drd_pre_thread_create); 783 VG_(track_pre_thread_first_insn)(drd_post_thread_create); 784 VG_(track_pre_thread_ll_exit) (drd_thread_finished); 785 VG_(atfork) (NULL/*pre*/, NULL/*parent*/, 786 drd__atfork_child/*child*/); 787 788 // Other stuff. 789 DRD_(register_malloc_wrappers)(drd_start_using_mem_w_ecu, 790 drd_stop_using_nonstack_mem); 791 792 DRD_(clientreq_init)(); 793 794 DRD_(suppression_init)(); 795 796 DRD_(clientobj_init)(); 797 798 { 799 Char* const smi = VG_(getenv)("DRD_SEGMENT_MERGING_INTERVAL"); 800 if (smi) 801 DRD_(thread_set_segment_merge_interval)(VG_(strtoll10)(smi, NULL)); 802 } 803 } 804 805 806 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init) 807