1 /* -*- mode: C; c-basic-offset: 3; indent-tabs-mode: nil; -*- */ 2 /* 3 This file is part of drd, a thread error detector. 4 5 Copyright (C) 2006-2011 Bart Van Assche <bvanassche (at) acm.org>. 6 7 This program is free software; you can redistribute it and/or 8 modify it under the terms of the GNU General Public License as 9 published by the Free Software Foundation; either version 2 of the 10 License, or (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 20 02111-1307, USA. 21 22 The GNU General Public License is contained in the file COPYING. 23 */ 24 25 26 #include "drd_clientobj.h" 27 #include "drd_error.h" 28 #include "drd_rwlock.h" 29 #include "pub_tool_vki.h" 30 #include "pub_tool_errormgr.h" // VG_(maybe_record_error)() 31 #include "pub_tool_libcassert.h" // tl_assert() 32 #include "pub_tool_libcprint.h" // VG_(message)() 33 #include "pub_tool_libcproc.h" // VG_(read_millisecond_timer)() 34 #include "pub_tool_machine.h" // VG_(get_IP)() 35 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)() 36 #include "pub_tool_threadstate.h" // VG_(get_running_tid)() 37 38 39 /* Local type definitions. */ 40 41 struct rwlock_thread_info 42 { 43 UWord tid; // DrdThreadId. 44 UInt reader_nesting_count; 45 UInt writer_nesting_count; 46 // Segment of last unlock call by this thread that unlocked a writer lock. 47 Segment* latest_wrlocked_segment; 48 // Segment of last unlock call by this thread that unlocked a reader lock. 49 Segment* latest_rdlocked_segment; 50 }; 51 52 53 /* Local functions. */ 54 55 static void rwlock_cleanup(struct rwlock_info* p); 56 static void rwlock_delete_thread(struct rwlock_info* const p, 57 const DrdThreadId tid); 58 59 60 /* Local variables. */ 61 62 static Bool DRD_(s_trace_rwlock); 63 static UInt DRD_(s_exclusive_threshold_ms); 64 static UInt DRD_(s_shared_threshold_ms); 65 static ULong DRD_(s_rwlock_segment_creation_count); 66 67 68 /* Function definitions. */ 69 70 void DRD_(rwlock_set_trace)(const Bool trace_rwlock) 71 { 72 tl_assert(trace_rwlock == False || trace_rwlock == True); 73 DRD_(s_trace_rwlock) = trace_rwlock; 74 } 75 76 void DRD_(rwlock_set_exclusive_threshold)(const UInt exclusive_threshold_ms) 77 { 78 DRD_(s_exclusive_threshold_ms) = exclusive_threshold_ms; 79 } 80 81 void DRD_(rwlock_set_shared_threshold)(const UInt shared_threshold_ms) 82 { 83 DRD_(s_shared_threshold_ms) = shared_threshold_ms; 84 } 85 86 static Bool DRD_(rwlock_is_rdlocked)(struct rwlock_info* p) 87 { 88 struct rwlock_thread_info* q; 89 90 VG_(OSetGen_ResetIter)(p->thread_info); 91 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; ) 92 { 93 return q->reader_nesting_count > 0; 94 } 95 return False; 96 } 97 98 static Bool DRD_(rwlock_is_wrlocked)(struct rwlock_info* p) 99 { 100 struct rwlock_thread_info* q; 101 102 VG_(OSetGen_ResetIter)(p->thread_info); 103 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; ) 104 { 105 return q->writer_nesting_count > 0; 106 } 107 return False; 108 } 109 110 static Bool DRD_(rwlock_is_locked)(struct rwlock_info* p) 111 { 112 return DRD_(rwlock_is_rdlocked)(p) || DRD_(rwlock_is_wrlocked)(p); 113 } 114 115 static Bool DRD_(rwlock_is_rdlocked_by)(struct rwlock_info* p, 116 const DrdThreadId tid) 117 { 118 const UWord uword_tid = tid; 119 struct rwlock_thread_info* q; 120 121 q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid); 122 return q && q->reader_nesting_count > 0; 123 } 124 125 static Bool DRD_(rwlock_is_wrlocked_by)(struct rwlock_info* p, 126 const DrdThreadId tid) 127 { 128 const UWord uword_tid = tid; 129 struct rwlock_thread_info* q; 130 131 q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid); 132 return q && q->writer_nesting_count > 0; 133 } 134 135 static Bool DRD_(rwlock_is_locked_by)(struct rwlock_info* p, 136 const DrdThreadId tid) 137 { 138 return (DRD_(rwlock_is_rdlocked_by)(p, tid) 139 || DRD_(rwlock_is_wrlocked_by)(p, tid)); 140 } 141 142 /** Either look up or insert a node corresponding to DRD thread id 'tid'. */ 143 static 144 struct rwlock_thread_info* 145 DRD_(lookup_or_insert_node)(OSet* oset, const UWord tid) 146 { 147 struct rwlock_thread_info* q; 148 149 q = VG_(OSetGen_Lookup)(oset, &tid); 150 if (q == 0) 151 { 152 q = VG_(OSetGen_AllocNode)(oset, sizeof(*q)); 153 q->tid = tid; 154 q->reader_nesting_count = 0; 155 q->writer_nesting_count = 0; 156 q->latest_wrlocked_segment = 0; 157 q->latest_rdlocked_segment = 0; 158 VG_(OSetGen_Insert)(oset, q); 159 } 160 tl_assert(q); 161 return q; 162 } 163 164 /** 165 * Combine the vector clock corresponding to the last unlock operation of 166 * reader-writer lock p into the vector clock of thread 'tid'. 167 */ 168 static void DRD_(rwlock_combine_other_vc)(struct rwlock_info* const p, 169 const DrdThreadId tid, 170 const Bool readers_too) 171 { 172 struct rwlock_thread_info* q; 173 VectorClock old_vc; 174 175 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc); 176 VG_(OSetGen_ResetIter)(p->thread_info); 177 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; ) 178 { 179 if (q->tid != tid) 180 { 181 if (q->latest_wrlocked_segment) 182 { 183 DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, 184 &q->latest_wrlocked_segment->vc); 185 } 186 if (readers_too && q->latest_rdlocked_segment) 187 { 188 DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, 189 &q->latest_rdlocked_segment->vc); 190 } 191 } 192 } 193 DRD_(thread_update_conflict_set)(tid, &old_vc); 194 DRD_(vc_cleanup)(&old_vc); 195 } 196 197 /** 198 * Compare the type of the rwlock specified at initialization time with 199 * the type passed as an argument, and complain if these two types do not 200 * match. 201 */ 202 static Bool drd_rwlock_check_type(struct rwlock_info* const p, 203 const RwLockT rwlock_type) 204 { 205 tl_assert(p); 206 /* The code below has to be updated if additional rwlock types are added. */ 207 tl_assert(rwlock_type == pthread_rwlock || rwlock_type == user_rwlock); 208 tl_assert(p->rwlock_type == pthread_rwlock || p->rwlock_type == user_rwlock); 209 210 if (p->rwlock_type == rwlock_type) 211 return True; 212 213 { 214 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 215 VG_(maybe_record_error) 216 (VG_(get_running_tid)(), 217 RwlockErr, 218 VG_(get_IP)(VG_(get_running_tid)()), 219 rwlock_type == pthread_rwlock 220 ? "Attempt to use a user-defined rwlock as a POSIX rwlock" 221 : "Attempt to use a POSIX rwlock as a user-defined rwlock", 222 &REI); 223 } 224 return False; 225 } 226 227 /** Initialize the rwlock_info data structure *p. */ 228 static 229 void DRD_(rwlock_initialize)(struct rwlock_info* const p, const Addr rwlock, 230 const RwLockT rwlock_type) 231 { 232 tl_assert(rwlock != 0); 233 tl_assert(p->a1 == rwlock); 234 tl_assert(p->type == ClientRwlock); 235 236 p->cleanup = (void(*)(DrdClientobj*))rwlock_cleanup; 237 p->delete_thread 238 = (void(*)(DrdClientobj*, DrdThreadId))rwlock_delete_thread; 239 p->rwlock_type = rwlock_type; 240 p->thread_info = VG_(OSetGen_Create)( 241 0, 0, VG_(malloc), "drd.rwlock.ri.1", VG_(free)); 242 p->acquiry_time_ms = 0; 243 p->acquired_at = 0; 244 } 245 246 /** Deallocate the memory that was allocated by rwlock_initialize(). */ 247 static void rwlock_cleanup(struct rwlock_info* p) 248 { 249 struct rwlock_thread_info* q; 250 251 tl_assert(p); 252 253 if (DRD_(s_trace_rwlock)) 254 DRD_(trace_msg)("[%d] rwlock_destroy 0x%lx", 255 DRD_(thread_get_running_tid)(), p->a1); 256 257 if (DRD_(rwlock_is_locked)(p)) 258 { 259 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 260 VG_(maybe_record_error)(VG_(get_running_tid)(), 261 RwlockErr, 262 VG_(get_IP)(VG_(get_running_tid)()), 263 "Destroying locked rwlock", 264 &REI); 265 } 266 267 VG_(OSetGen_ResetIter)(p->thread_info); 268 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; ) 269 { 270 DRD_(sg_put)(q->latest_wrlocked_segment); 271 DRD_(sg_put)(q->latest_rdlocked_segment); 272 } 273 274 VG_(OSetGen_Destroy)(p->thread_info); 275 } 276 277 static 278 struct rwlock_info* 279 DRD_(rwlock_get_or_allocate)(const Addr rwlock, const RwLockT rwlock_type) 280 { 281 struct rwlock_info* p; 282 283 tl_assert(offsetof(DrdClientobj, rwlock) == 0); 284 p = &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock); 285 if (p) 286 { 287 drd_rwlock_check_type(p, rwlock_type); 288 return p; 289 } 290 291 if (DRD_(clientobj_present)(rwlock, rwlock + 1)) 292 { 293 GenericErrInfo GEI = { 294 .tid = DRD_(thread_get_running_tid)(), 295 .addr = rwlock, 296 }; 297 VG_(maybe_record_error)(VG_(get_running_tid)(), 298 GenericErr, 299 VG_(get_IP)(VG_(get_running_tid)()), 300 "Not a reader-writer lock", 301 &GEI); 302 return 0; 303 } 304 305 p = &(DRD_(clientobj_add)(rwlock, ClientRwlock)->rwlock); 306 DRD_(rwlock_initialize)(p, rwlock, rwlock_type); 307 return p; 308 } 309 310 static struct rwlock_info* DRD_(rwlock_get)(const Addr rwlock) 311 { 312 tl_assert(offsetof(DrdClientobj, rwlock) == 0); 313 return &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock); 314 } 315 316 /** Called before pthread_rwlock_init(). */ 317 struct rwlock_info* DRD_(rwlock_pre_init)(const Addr rwlock, 318 const RwLockT rwlock_type) 319 { 320 struct rwlock_info* p; 321 322 if (DRD_(s_trace_rwlock)) 323 DRD_(trace_msg)("[%d] rwlock_init 0x%lx", 324 DRD_(thread_get_running_tid)(), rwlock); 325 326 p = DRD_(rwlock_get)(rwlock); 327 328 if (p) 329 drd_rwlock_check_type(p, rwlock_type); 330 331 if (p) 332 { 333 const ThreadId vg_tid = VG_(get_running_tid)(); 334 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 335 VG_(maybe_record_error)(vg_tid, 336 RwlockErr, 337 VG_(get_IP)(vg_tid), 338 "Reader-writer lock reinitialization", 339 &REI); 340 return p; 341 } 342 343 p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type); 344 345 return p; 346 } 347 348 /** Called after pthread_rwlock_destroy(). */ 349 void DRD_(rwlock_post_destroy)(const Addr rwlock, const RwLockT rwlock_type) 350 { 351 struct rwlock_info* p; 352 353 p = DRD_(rwlock_get)(rwlock); 354 if (p == 0) 355 { 356 GenericErrInfo GEI = { 357 .tid = DRD_(thread_get_running_tid)(), 358 .addr = rwlock, 359 }; 360 VG_(maybe_record_error)(VG_(get_running_tid)(), 361 GenericErr, 362 VG_(get_IP)(VG_(get_running_tid)()), 363 "Not a reader-writer lock", 364 &GEI); 365 return; 366 } 367 368 drd_rwlock_check_type(p, rwlock_type); 369 370 DRD_(clientobj_remove)(rwlock, ClientRwlock); 371 } 372 373 /** 374 * Called before pthread_rwlock_rdlock() is invoked. If a data structure for 375 * the client-side object was not yet created, do this now. Also check whether 376 * an attempt is made to lock recursively a synchronization object that must 377 * not be locked recursively. 378 */ 379 void DRD_(rwlock_pre_rdlock)(const Addr rwlock, const RwLockT rwlock_type) 380 { 381 struct rwlock_info* p; 382 383 if (DRD_(s_trace_rwlock)) 384 DRD_(trace_msg)("[%d] pre_rwlock_rdlock 0x%lx", 385 DRD_(thread_get_running_tid)(), rwlock); 386 387 p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type); 388 tl_assert(p); 389 390 if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)())) { 391 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 392 VG_(maybe_record_error)(VG_(get_running_tid)(), 393 RwlockErr, 394 VG_(get_IP)(VG_(get_running_tid)()), 395 "Already locked for writing by calling thread", 396 &REI); 397 } 398 } 399 400 /** 401 * Update rwlock_info state when locking the pthread_rwlock_t mutex. 402 * Note: this function must be called after pthread_rwlock_rdlock() has been 403 * called, or a race condition is triggered ! 404 */ 405 void DRD_(rwlock_post_rdlock)(const Addr rwlock, const RwLockT rwlock_type, 406 const Bool took_lock) 407 { 408 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)(); 409 struct rwlock_info* p; 410 struct rwlock_thread_info* q; 411 412 if (DRD_(s_trace_rwlock)) 413 DRD_(trace_msg)("[%d] post_rwlock_rdlock 0x%lx", drd_tid, rwlock); 414 415 p = DRD_(rwlock_get)(rwlock); 416 417 if (! p || ! took_lock) 418 return; 419 420 tl_assert(! DRD_(rwlock_is_wrlocked)(p)); 421 422 q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid); 423 if (++q->reader_nesting_count == 1) 424 { 425 DRD_(thread_new_segment)(drd_tid); 426 DRD_(s_rwlock_segment_creation_count)++; 427 DRD_(rwlock_combine_other_vc)(p, drd_tid, False); 428 429 p->acquiry_time_ms = VG_(read_millisecond_timer)(); 430 p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0); 431 } 432 } 433 434 /** 435 * Called before pthread_rwlock_wrlock() is invoked. If a data structure for 436 * the client-side object was not yet created, do this now. Also check whether 437 * an attempt is made to lock recursively a synchronization object that must 438 * not be locked recursively. 439 */ 440 void DRD_(rwlock_pre_wrlock)(const Addr rwlock, const RwLockT rwlock_type) 441 { 442 struct rwlock_info* p; 443 444 p = DRD_(rwlock_get)(rwlock); 445 446 if (DRD_(s_trace_rwlock)) 447 DRD_(trace_msg)("[%d] pre_rwlock_wrlock 0x%lx", 448 DRD_(thread_get_running_tid)(), rwlock); 449 450 if (p == 0) 451 p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type); 452 453 tl_assert(p); 454 455 if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)())) 456 { 457 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 458 VG_(maybe_record_error)(VG_(get_running_tid)(), 459 RwlockErr, 460 VG_(get_IP)(VG_(get_running_tid)()), 461 "Recursive writer locking not allowed", 462 &REI); 463 } 464 } 465 466 /** 467 * Update rwlock_info state when locking the pthread_rwlock_t rwlock. 468 * Note: this function must be called after pthread_rwlock_wrlock() has 469 * finished, or a race condition is triggered ! 470 */ 471 void DRD_(rwlock_post_wrlock)(const Addr rwlock, const RwLockT rwlock_type, 472 const Bool took_lock) 473 { 474 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)(); 475 struct rwlock_info* p; 476 struct rwlock_thread_info* q; 477 478 p = DRD_(rwlock_get)(rwlock); 479 480 if (DRD_(s_trace_rwlock)) 481 DRD_(trace_msg)("[%d] post_rwlock_wrlock 0x%lx", drd_tid, rwlock); 482 483 if (! p || ! took_lock) 484 return; 485 486 q = DRD_(lookup_or_insert_node)(p->thread_info, 487 DRD_(thread_get_running_tid)()); 488 tl_assert(q->writer_nesting_count == 0); 489 q->writer_nesting_count++; 490 tl_assert(q->writer_nesting_count == 1); 491 DRD_(thread_new_segment)(drd_tid); 492 DRD_(s_rwlock_segment_creation_count)++; 493 DRD_(rwlock_combine_other_vc)(p, drd_tid, True); 494 p->acquiry_time_ms = VG_(read_millisecond_timer)(); 495 p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0); 496 } 497 498 /** 499 * Update rwlock_info state when unlocking the pthread_rwlock_t rwlock. 500 * 501 * @param rwlock Pointer to pthread_rwlock_t data structure in the client space. 502 * 503 * @return New value of the rwlock recursion count. 504 * 505 * @note This function must be called before pthread_rwlock_unlock() is called, 506 * or a race condition is triggered ! 507 */ 508 void DRD_(rwlock_pre_unlock)(const Addr rwlock, const RwLockT rwlock_type) 509 { 510 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)(); 511 const ThreadId vg_tid = VG_(get_running_tid)(); 512 struct rwlock_info* p; 513 struct rwlock_thread_info* q; 514 515 if (DRD_(s_trace_rwlock)) 516 DRD_(trace_msg)("[%d] rwlock_unlock 0x%lx", drd_tid, rwlock); 517 518 p = DRD_(rwlock_get)(rwlock); 519 if (p == 0) 520 { 521 GenericErrInfo GEI = { 522 .tid = DRD_(thread_get_running_tid)(), 523 .addr = rwlock, 524 }; 525 VG_(maybe_record_error)(VG_(get_running_tid)(), 526 GenericErr, 527 VG_(get_IP)(VG_(get_running_tid)()), 528 "Not a reader-writer lock", 529 &GEI); 530 return; 531 } 532 533 drd_rwlock_check_type(p, rwlock_type); 534 535 if (! DRD_(rwlock_is_locked_by)(p, drd_tid)) 536 { 537 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 538 VG_(maybe_record_error)(vg_tid, 539 RwlockErr, 540 VG_(get_IP)(vg_tid), 541 "Reader-writer lock not locked by calling thread", 542 &REI); 543 return; 544 } 545 q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid); 546 tl_assert(q); 547 if (q->reader_nesting_count > 0) 548 { 549 q->reader_nesting_count--; 550 if (q->reader_nesting_count == 0 && DRD_(s_shared_threshold_ms) > 0) 551 { 552 Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms; 553 if (held > DRD_(s_shared_threshold_ms)) 554 { 555 HoldtimeErrInfo HEI 556 = { DRD_(thread_get_running_tid)(), 557 rwlock, p->acquired_at, held, DRD_(s_shared_threshold_ms) }; 558 VG_(maybe_record_error)(vg_tid, 559 HoldtimeErr, 560 VG_(get_IP)(vg_tid), 561 "rwlock", 562 &HEI); 563 } 564 } 565 if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0) 566 { 567 /* 568 * This pthread_rwlock_unlock() call really unlocks the rwlock. Save 569 * the current vector clock of the thread such that it is available 570 * when this rwlock is locked again. 571 */ 572 DRD_(thread_get_latest_segment)(&q->latest_rdlocked_segment, drd_tid); 573 DRD_(thread_new_segment)(drd_tid); 574 DRD_(s_rwlock_segment_creation_count)++; 575 } 576 } 577 else if (q->writer_nesting_count > 0) 578 { 579 q->writer_nesting_count--; 580 if (q->writer_nesting_count == 0 && DRD_(s_exclusive_threshold_ms) > 0) 581 { 582 Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms; 583 if (held > DRD_(s_exclusive_threshold_ms)) 584 { 585 HoldtimeErrInfo HEI 586 = { DRD_(thread_get_running_tid)(), 587 rwlock, p->acquired_at, held, 588 DRD_(s_exclusive_threshold_ms) }; 589 VG_(maybe_record_error)(vg_tid, 590 HoldtimeErr, 591 VG_(get_IP)(vg_tid), 592 "rwlock", 593 &HEI); 594 } 595 } 596 if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0) 597 { 598 /* 599 * This pthread_rwlock_unlock() call really unlocks the rwlock. Save 600 * the current vector clock of the thread such that it is available 601 * when this rwlock is locked again. 602 */ 603 DRD_(thread_get_latest_segment)(&q->latest_wrlocked_segment, drd_tid); 604 DRD_(thread_new_segment)(drd_tid); 605 DRD_(s_rwlock_segment_creation_count)++; 606 } 607 } 608 else 609 { 610 tl_assert(False); 611 } 612 } 613 614 /** Called when thread tid stops to exist. */ 615 static void rwlock_delete_thread(struct rwlock_info* const p, 616 const DrdThreadId tid) 617 { 618 struct rwlock_thread_info* q; 619 620 if (DRD_(rwlock_is_locked_by)(p, tid)) 621 { 622 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 }; 623 VG_(maybe_record_error)(VG_(get_running_tid)(), 624 RwlockErr, 625 VG_(get_IP)(VG_(get_running_tid)()), 626 "Reader-writer lock still locked at thread exit", 627 &REI); 628 q = DRD_(lookup_or_insert_node)(p->thread_info, tid); 629 q->reader_nesting_count = 0; 630 q->writer_nesting_count = 0; 631 } 632 } 633 634 ULong DRD_(get_rwlock_segment_creation_count)(void) 635 { 636 return DRD_(s_rwlock_segment_creation_count); 637 } 638