Home | History | Annotate | Download | only in helgrind

Lines Matching defs:thr

164 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
168 static Thread* mk_Thread ( Thr* hbthr ) {
236 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
239 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
242 tl_assert(HG_(is_sane_Thread)(thr));
252 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
267 VG_(addToBag)( lk->heldBy, (UWord)thr );
276 /* assert: .. and that thread is 'thr'. */
277 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
279 VG_(addToBag)(lk->heldBy, (UWord)thr);
290 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
293 tl_assert(HG_(is_sane_Thread)(thr));
308 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
317 VG_(addToBag)(lk->heldBy, (UWord)thr);
321 VG_(addToBag)( lk->heldBy, (UWord)thr );
327 /* Update 'lk' to reflect a release of it by 'thr'. This is done
331 static void lockN_release ( Lock* lk, Thread* thr )
335 tl_assert(HG_(is_sane_Thread)(thr));
340 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
341 /* thr must actually have been a holder of lk */
356 Thread* thr;
363 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
364 tl_assert(HG_(is_sane_Thread)(thr));
366 thr->locksetA, (UWord)lk ));
367 thr->locksetA
368 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
372 thr->locksetW, (UWord)lk ));
373 thr->locksetW
374 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
497 Thread* thr;
501 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
503 VG_(printf)("%lu:%p ", count, thr);
507 count, thr->errmsg_index);
508 if (thr->coretid == VG_INVALID_THREADID)
511 VG_(printf)("tid %d ", thr->coretid);
587 static void initialise_data_structures ( Thr* hbthr_root )
589 Thread* thr;
627 thr = mk_Thread(hbthr_root);
628 thr->coretid = 1; /* FIXME: hardwires an assumption about the
631 libhb_set_Thr_hgthread(hbthr_root, thr);
634 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
635 tl_assert(thr->coretid != VG_INVALID_THREADID);
637 map_threads[thr->coretid] = thr;
652 Thread* thr;
654 thr = map_threads[coretid];
655 return thr;
661 Thread* thr;
663 thr = map_threads[coretid];
664 tl_assert(thr);
665 return thr;
668 /* Do a reverse lookup. Does not assert if 'thr' is not found in
670 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
673 tl_assert(HG_(is_sane_Thread)(thr));
677 tid = thr->coretid;
682 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
684 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
686 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
695 Thread* thr;
698 thr = map_threads[coretid];
699 tl_assert(thr);
779 // and the associated Segment has .thr == t
800 and seg->thr->lockset does not contain lk
815 seg->thr is a sane Thread
830 each thr in tset is a valid thread, which is non-dead
836 /* Return True iff 'thr' holds 'lk' in some mode. */
837 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
840 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
851 Thread* thr;
856 for (thr = admin_threads; thr; thr = thr->admin) {
857 if (!HG_(is_sane_Thread)(thr)) BAD("1");
858 wsA = thr->locksetA;
859 wsW = thr->locksetW;
869 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
911 Thread* thr;
915 (UWord*)&thr, &count )) {
918 tl_assert(HG_(is_sane_Thread)(thr));
919 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
923 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
926 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
973 static void shadow_mem_scopy_range ( Thread* thr,
976 Thr* hbthr = thr->hbthr;
981 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
983 Thr* hbthr = thr->hbthr;
988 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
989 Thr* hbthr = thr->hbthr;
994 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
996 libhb_srange_new( thr->hbthr, a, len );
999 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
1004 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1007 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1012 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1015 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1019 libhb_srange_untrack( thr->hbthr, aIN, len );
1030 /* Create a new segment for 'thr', making it depend (.prev) on its
1032 return both of them. Also update 'thr' so it references the new
1037 //zz Thread* thr )
1042 //zz tl_assert(HG_(is_sane_Thread)(thr));
1043 //zz cur_seg = map_segments_lookup( thr->csegid );
1045 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1047 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1050 //zz thr->csegid = *new_segidP;
1057 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1073 tl_assert(HG_(is_sane_Thread)(thr));
1077 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1081 tl_assert(thr->hbthr);
1087 lockN_acquire_writer( lk, thr );
1089 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1098 thr, "Bug in libpthread: write lock "
1107 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1109 thr, "Bug in libpthread: write lock "
1115 /* So the lock is already held in w-mode by 'thr'. That means this
1122 thr, "Bug in libpthread: recursive write lock "
1129 lockN_acquire_writer( lk, thr );
1132 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1139 laog__pre_thread_acquires_lock( thr, lk );
1142 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1143 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1154 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1170 tl_assert(HG_(is_sane_Thread)(thr));
1176 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1180 tl_assert(thr->hbthr);
1186 lockN_acquire_reader( lk, thr );
1188 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1196 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1204 lockN_acquire_reader( lk, thr );
1207 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1214 laog__pre_thread_acquires_lock( thr, lk );
1217 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1218 /* but don't update thr->locksetW, since lk is only rd-held */
1229 void evhH__pre_thread_releases_lock ( Thread* thr,
1245 tl_assert(HG_(is_sane_Thread)(thr));
1252 HG_(record_error_UnlockBogus)( thr, lock_ga );
1260 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1264 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1272 HG_(record_error_UnlockUnlocked)( thr, lock );
1273 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1274 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1284 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1293 tl_assert(realOwner != thr);
1294 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1295 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1296 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1303 lockN_release( lock, thr );
1310 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1315 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1317 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1319 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1345 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1348 thr->locksetA
1349 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1350 thr->locksetW
1351 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1353 tl_assert(thr->hbthr);
1357 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1410 Thread* thr;
1411 thr = get_current_Thread_in_C_C();
1412 if (LIKELY(thr))
1413 return thr;
1422 thr = map_threads_lookup( coretid );
1423 return thr;
1538 Thr* hbthr_p;
1539 Thr* hbthr_c;
1597 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1652 Thread* thr;
1654 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1655 tl_assert(!thr);
1660 thr = map_threads_maybe_lookup(i);
1661 if (!thr)
1665 tl_assert(thr->hbthr);
1666 libhb_async_exit(thr->hbthr);
1667 tl_assert(thr->coretid == i);
1668 thr->coretid = VG_INVALID_THREADID;
1679 Thr* hbthr_s;
1680 Thr* hbthr_q;
1800 Thread* thr;
1803 thr = get_current_Thread();
1804 tl_assert(thr);
1811 shadow_mem_cwrite_range(thr, a, len);
1813 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1822 Thread* thr = get_current_Thread_in_C_C();
1823 Thr* hbthr = thr->hbthr;
1829 Thread* thr = get_current_Thread_in_C_C();
1830 Thr* hbthr = thr->hbthr;
1836 Thread* thr = get_current_Thread_in_C_C();
1837 Thr* hbthr = thr->hbthr;
1843 Thread* thr = get_current_Thread_in_C_C();
1844 Thr* hbthr = thr->hbthr;
1850 Thread* thr = get_current_Thread_in_C_C();
1851 Thr* hbthr = thr->hbthr;
1857 Thread* thr = get_current_Thread_in_C_C();
1858 Thr* hbthr = thr->hbthr;
1864 Thread* thr = get_current_Thread_in_C_C();
1865 Thr* hbthr = thr->hbthr;
1871 Thread* thr = get_current_Thread_in_C_C();
1872 Thr* hbthr = thr->hbthr;
1878 Thread* thr = get_current_Thread_in_C_C();
1879 Thr* hbthr = thr->hbthr;
1885 Thread* thr = get_current_Thread_in_C_C();
1886 Thr* hbthr = thr->hbthr;
1917 Thread* thr;
1924 thr = map_threads_maybe_lookup( tid );
1926 tl_assert( HG_(is_sane_Thread)(thr) );
1940 thr, "pthread_mutex_destroy with invalid argument" );
1949 thr, "pthread_mutex_destroy of a locked mutex" );
1976 Thread* thr;
1983 thr = map_threads_maybe_lookup( tid );
1984 tl_assert(thr); /* cannot fail - Thread* must already exist */
1989 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1998 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2007 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2009 HG_(record_error_Misc)( thr, errstr );
2017 Thread* thr;
2022 thr = map_threads_maybe_lookup( tid );
2023 tl_assert(thr); /* cannot fail - Thread* must already exist */
2026 thr,
2035 Thread* thr;
2040 thr = map_threads_maybe_lookup( tid );
2041 tl_assert(thr); /* cannot fail - Thread* must already exist */
2043 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2049 Thread* thr;
2053 thr = map_threads_maybe_lookup( tid );
2054 tl_assert(thr); /* cannot fail - Thread* must already exist */
2070 Thread* thr;
2081 thr = map_threads_maybe_lookup( tid );
2083 tl_assert( HG_(is_sane_Thread)(thr) );
2090 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2202 Thread* thr;
2205 thr = map_threads_maybe_lookup( tid );
2206 tl_assert(thr); /* cannot fail - Thread* must already exist */
2216 thr, "pthread_cond_destroy:"
2234 thr, "pthread_cond_destroy: destruction of unknown cond var");
2247 Thread* thr;
2255 thr = map_threads_maybe_lookup( tid );
2256 tl_assert(thr); /* cannot fail - Thread* must already exist */
2290 HG_(record_error_Misc)(thr,
2294 HG_(record_error_Misc)(thr,
2298 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2299 HG_(record_error_Misc)(thr,
2309 //HG_(record_error_Misc)( thr,
2315 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2323 Thread* thr;
2333 thr = map_threads_maybe_lookup( tid );
2334 tl_assert(thr); /* cannot fail - Thread* must already exist */
2344 thr,
2351 thr, "pthread_cond_{timed}wait called with mutex "
2357 thr, "pthread_cond_{timed}wait called with un-held mutex");
2360 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2363 thr, "pthread_cond_{timed}wait called with mutex "
2379 thr, "pthread_cond_{timed}wait: cond is associated "
2394 Thread* thr;
2402 thr = map_threads_maybe_lookup( tid );
2403 tl_assert(thr); /* cannot fail - Thread* must already exist */
2413 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2426 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2432 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2487 Thread* thr;
2493 thr = map_threads_maybe_lookup( tid );
2495 tl_assert( HG_(is_sane_Thread)(thr) );
2501 thr, "pthread_rwlock_destroy with invalid argument" );
2510 thr, "pthread_rwlock_destroy of a locked mutex" );
2538 Thread* thr;
2546 thr = map_threads_maybe_lookup( tid );
2547 tl_assert(thr); /* cannot fail - Thread* must already exist */
2554 thr, "pthread_rwlock_{rd,rw}lock with a "
2563 Thread* thr;
2569 thr = map_threads_maybe_lookup( tid );
2570 tl_assert(thr); /* cannot fail - Thread* must already exist */
2574 thr,
2583 Thread* thr;
2588 thr = map_threads_maybe_lookup( tid );
2589 tl_assert(thr); /* cannot fail - Thread* must already exist */
2591 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2597 Thread* thr;
2601 thr = map_threads_maybe_lookup( tid );
2602 tl_assert(thr); /* cannot fail - Thread* must already exist */
2729 Thread* thr;
2735 thr = map_threads_maybe_lookup( tid );
2736 tl_assert(thr); /* cannot fail - Thread* must already exist */
2750 thr, "sem_init: initial value exceeds 10000; using 10000" );
2757 Thr* hbthr = thr->hbthr;
2776 Thread* thr;
2778 Thr* hbthr;
2784 thr = map_threads_maybe_lookup( tid );
2785 tl_assert(thr); /* cannot fail - Thread* must already exist */
2789 hbthr = thr->hbthr;
2804 Thread* thr;
2806 Thr* hbthr;
2812 thr = map_threads_maybe_lookup( tid );
2813 tl_assert(thr); /* cannot fail - Thread* must already exist */
2820 hbthr = thr->hbthr;
2830 thr, "Bug in libpthread: sem_wait succeeded on"
2906 Thread* thr;
2914 thr = map_threads_maybe_lookup( tid );
2915 tl_assert(thr); /* cannot fail - Thread* must already exist */
2919 thr, "pthread_barrier_init: 'count' argument is zero"
2925 thr, "pthread_barrier_init: invalid 'resizable' argument"
2934 thr, "pthread_barrier_init: barrier is already initialised"
2941 thr, "pthread_barrier_init: threads are waiting at barrier"
2961 Thread* thr;
2972 thr = map_threads_maybe_lookup( tid );
2973 tl_assert(thr); /* cannot fail - Thread* must already exist */
2980 thr, "pthread_barrier_destroy: barrier was never initialised"
2986 thr, "pthread_barrier_destroy: threads are waiting at barrier"
3020 Thr* hbthr = t->hbthr;
3026 Thr* hbthr = t->hbthr;
3082 Thread* thr;
3091 thr = map_threads_maybe_lookup( tid );
3092 tl_assert(thr); /* cannot fail - Thread* must already exist */
3099 thr, "pthread_barrier_wait: barrier is uninitialised"
3108 VG_(addToXA)( bar->waiting, &thr );
3125 Thread* thr;
3134 thr = map_threads_maybe_lookup( tid );
3135 tl_assert(thr); /* cannot fail - Thread* must already exist */
3142 thr, "pthread_barrier_resize: barrier is uninitialised"
3149 thr, "pthread_barrier_resize: barrier is may not be resized"
3156 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3246 Thread* thr;
3253 thr = map_threads_maybe_lookup( tid );
3254 tl_assert(thr); /* cannot fail - Thread* must already exist */
3259 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3271 Thread* thr;
3278 thr = map_threads_maybe_lookup( tid );
3279 tl_assert(thr); /* cannot fail - Thread* must already exist */
3287 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3749 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3750 between 'lk' and the locks already held by 'thr' and issue a
3755 Thread* thr, /* NB: BEFORE lock is added */
3763 /* It may be that 'thr' already holds 'lk' and is recursively
3766 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3770 to any of the locks already held by thr, since if any such path
3775 other = laog__do_dfs_from_to(lk, thr->locksetA);
3798 thr, lk->guestaddr, other->guestaddr,
3848 thr, lk->guestaddr, other->guestaddr,
3854 (old, lk) | old <- locks already held by thr
3855 Since both old and lk are currently held by thr, their acquired_at
3859 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3976 Thread* thr; /* allocating thread */
4022 md->thr = map_threads_lookup( tid );
4172 md_new->thr = map_threads_lookup( tid );
5101 Thread* thr = map_threads_maybe_lookup( tid );
5102 tl_assert( thr ); /* I must be mapped */
5107 HG_(record_error_Misc)( thr, buf );
5336 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5338 Thread* thr;
5342 thr = libhb_get_Thr_hgthread( hbt );
5343 tl_assert(thr);
5344 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5353 ExeContext* for_libhb__get_EC ( Thr* hbt )
5355 Thread* thr;
5359 thr = libhb_get_Thr_hgthread( hbt );
5360 tl_assert(thr);
5361 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5370 Thr* hbthr_root;