Lines Matching refs:Thr
160 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
164 static Thread* mk_Thread ( Thr* hbthr ) {
232 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
235 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
238 tl_assert(HG_(is_sane_Thread)(thr));
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
263 VG_(addToBag)( lk->heldBy, (Word)thr );
272 /* assert: .. and that thread is 'thr'. */
273 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
275 VG_(addToBag)(lk->heldBy, (Word)thr);
286 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
289 tl_assert(HG_(is_sane_Thread)(thr));
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
313 VG_(addToBag)(lk->heldBy, (Word)thr);
317 VG_(addToBag)( lk->heldBy, (Word)thr );
323 /* Update 'lk' to reflect a release of it by 'thr'. This is done
327 static void lockN_release ( Lock* lk, Thread* thr )
331 tl_assert(HG_(is_sane_Thread)(thr));
336 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
337 /* thr must actually have been a holder of lk */
352 Thread* thr;
359 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
360 tl_assert(HG_(is_sane_Thread)(thr));
362 thr->locksetA, (Word)lk ));
363 thr->locksetA
364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
368 thr->locksetW, (Word)lk ));
369 thr->locksetW
370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
472 Thread* thr;
476 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
477 VG_(printf)("%lu:%p ", count, thr);
549 static void initialise_data_structures ( Thr* hbthr_root )
551 Thread* thr;
591 thr = mk_Thread(hbthr_root);
592 thr->coretid = 1; /* FIXME: hardwires an assumption about the
595 libhb_set_Thr_hgthread(hbthr_root, thr);
598 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
599 tl_assert(thr->coretid != VG_INVALID_THREADID);
601 map_threads[thr->coretid] = thr;
616 Thread* thr;
618 thr = map_threads[coretid];
619 return thr;
625 Thread* thr;
627 thr = map_threads[coretid];
628 tl_assert(thr);
629 return thr;
632 /* Do a reverse lookup. Does not assert if 'thr' is not found in
634 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
637 tl_assert(HG_(is_sane_Thread)(thr));
641 tid = thr->coretid;
646 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
648 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
650 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
659 Thread* thr;
662 thr = map_threads[coretid];
663 tl_assert(thr);
743 // and the associated Segment has .thr == t
764 and seg->thr->lockset does not contain lk
779 seg->thr is a sane Thread
794 each thr in tset is a valid thread, which is non-dead
800 /* Return True iff 'thr' holds 'lk' in some mode. */
801 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
804 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
815 Thread* thr;
820 for (thr = admin_threads; thr; thr = thr->admin) {
821 if (!HG_(is_sane_Thread)(thr)) BAD("1");
822 wsA = thr->locksetA;
823 wsW = thr->locksetW;
833 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
875 Thread* thr;
879 (Word*)&thr, &count )) {
882 tl_assert(HG_(is_sane_Thread)(thr));
883 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
887 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
890 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
937 static void shadow_mem_scopy_range ( Thread* thr,
940 Thr* hbthr = thr->hbthr;
945 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
947 Thr* hbthr = thr->hbthr;
952 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
953 Thr* hbthr = thr->hbthr;
958 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
960 libhb_srange_new( thr->hbthr, a, len );
963 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
968 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
971 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
976 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
979 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
983 libhb_srange_untrack( thr->hbthr, aIN, len );
994 /* Create a new segment for 'thr', making it depend (.prev) on its
996 return both of them. Also update 'thr' so it references the new
1001 //zz Thread* thr )
1006 //zz tl_assert(HG_(is_sane_Thread)(thr));
1007 //zz cur_seg = map_segments_lookup( thr->csegid );
1009 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1011 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1014 //zz thr
1021 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1037 tl_assert(HG_(is_sane_Thread)(thr));
1041 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1045 tl_assert(thr->hbthr);
1051 lockN_acquire_writer( lk, thr );
1053 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1062 thr, "Bug in libpthread: write lock "
1071 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1073 thr, "Bug in libpthread: write lock "
1079 /* So the lock is already held in w-mode by 'thr'. That means this
1086 thr, "Bug in libpthread: recursive write lock "
1093 lockN_acquire_writer( lk, thr );
1096 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1103 laog__pre_thread_acquires_lock( thr, lk );
1106 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1107 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1118 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1134 tl_assert(HG_(is_sane_Thread)(thr));
1140 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1144 tl_assert(thr->hbthr);
1150 lockN_acquire_reader( lk, thr );
1152 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1160 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1168 lockN_acquire_reader( lk, thr );
1171 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1178 laog__pre_thread_acquires_lock( thr, lk );
1181 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1182 /* but don't update thr->locksetW, since lk is only rd-held */
1193 void evhH__pre_thread_releases_lock ( Thread* thr,
1209 tl_assert(HG_(is_sane_Thread)(thr));
1216 HG_(record_error_UnlockBogus)( thr, lock_ga );
1224 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1228 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1236 HG_(record_error_UnlockUnlocked)( thr, lock );
1237 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1238 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1248 n = VG_(elemBag)( lock->heldBy, (Word)thr );
1257 tl_assert(realOwner != thr);
1258 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1259 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1260 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1267 lockN_release( lock, thr );
1274 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
1279 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1281 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1283 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1309 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1312 thr->locksetA
1313 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1314 thr->locksetW
1315 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
1317 tl_assert(thr->hbthr);
1321 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1374 Thread* thr;
1375 thr = get_current_Thread_in_C_C();
1376 if (LIKELY(thr))
1377 return thr;
1386 thr = map_threads_lookup( coretid );
1387 return thr;
1502 Thr* hbthr_p;
1503 Thr* hbthr_c;
1561 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1616 Thread* thr;
1618 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1619 tl_assert(!thr);
1624 thr = map_threads_maybe_lookup(i);
1625 if (!thr)
1629 tl_assert(thr->hbthr);
1630 libhb_async_exit(thr->hbthr);
1631 tl_assert(thr->coretid == i);
1632 thr->coretid = VG_INVALID_THREADID;
1643 Thr* hbthr_s;
1644 Thr* hbthr_q;
1764 Thread* thr;
1767 thr = get_current_Thread();
1768 tl_assert(thr);
1775 shadow_mem_cwrite_range(thr, a, len);
1777 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1786 Thread* thr = get_current_Thread_in_C_C();
1787 Thr* hbthr = thr->hbthr;
1793 Thread* thr = get_current_Thread_in_C_C();
1794 Thr* hbthr = thr->hbthr;
1800 Thread* thr = get_current_Thread_in_C_C();
1801 Thr* hbthr = thr->hbthr;
1807 Thread* thr = get_current_Thread_in_C_C();
1808 Thr* hbthr = thr->hbthr;
1814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
1821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
1828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
1835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
1842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
1849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
1880 Thread* thr;
1886 thr = map_threads_maybe_lookup( tid );
1888 tl_assert( HG_(is_sane_Thread)(thr) );
1894 thr, "pthread_mutex_destroy with invalid argument" );
1903 thr, "pthread_mutex_destroy of a locked mutex" );
1929 Thread* thr;
1936 thr = map_threads_maybe_lookup( tid );
1937 tl_assert(thr); /* cannot fail - Thread* must already exist */
1942 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1951 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
1960 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1962 HG_(record_error_Misc)( thr, errstr );
1970 Thread* thr;
1975 thr = map_threads_maybe_lookup( tid );
1976 tl_assert(thr); /* cannot fail - Thread* must already exist */
1979 thr,
1988 Thread* thr;
1993 thr = map_threads_maybe_lookup( tid );
1994 tl_assert(thr); /* cannot fail - Thread* must already exist */
1996 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2002 Thread* thr;
2006 thr = map_threads_maybe_lookup( tid );
2007 tl_assert(thr); /* cannot fail - Thread* must already exist */
2023 Thread* thr;
2034 thr = map_threads_maybe_lookup( tid );
2036 tl_assert( HG_(is_sane_Thread)(thr) );
2043 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2154 Thread* thr;
2157 thr = map_threads_maybe_lookup( tid );
2158 tl_assert(thr); /* cannot fail - Thread* must already exist */
2167 HG_(record_error_Misc)(thr,
2175 HG_(record_error_Misc)(thr,
2188 Thread* thr;
2196 thr = map_threads_maybe_lookup( tid );
2197 tl_assert(thr); /* cannot fail - Thread* must already exist */
2231 HG_(record_error_Misc)(thr,
2235 HG_(record_error_Misc)(thr,
2239 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2240 HG_(record_error_Misc)(thr,
2250 //HG_(record_error_Misc)( thr,
2256 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2264 Thread* thr;
2274 thr = map_threads_maybe_lookup( tid );
2275 tl_assert(thr); /* cannot fail - Thread* must already exist */
2285 thr,
2292 thr, "pthread_cond_{timed}wait called with mutex "
2298 thr, "pthread_cond_{timed}wait called with un-held mutex");
2301 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
2304 thr, "pthread_cond_{timed}wait called with mutex "
2320 thr, "pthread_cond_{timed}wait: cond is associated "
2334 Thread* thr;
2342 thr = map_threads_maybe_lookup( tid );
2343 tl_assert(thr); /* cannot fail - Thread* must already exist */
2353 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2366 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2372 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2411 Thread* thr;
2417 thr = map_threads_maybe_lookup( tid );
2419 tl_assert( HG_(is_sane_Thread)(thr) );
2425 thr, "pthread_rwlock_destroy with invalid argument" );
2434 thr, "pthread_rwlock_destroy of a locked mutex" );
2462 Thread* thr;
2470 thr = map_threads_maybe_lookup( tid );
2471 tl_assert(thr); /* cannot fail - Thread* must already exist */
2478 thr, "pthread_rwlock_{rd,rw}lock with a "
2487 Thread* thr;
2493 thr = map_threads_maybe_lookup( tid );
2494 tl_assert(thr); /* cannot fail - Thread* must already exist */
2498 thr,
2507 Thread* thr;
2512 thr = map_threads_maybe_lookup( tid );
2513 tl_assert(thr); /* cannot fail - Thread* must already exist */
2515 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2521 Thread* thr;
2525 thr = map_threads_maybe_lookup( tid );
2526 tl_assert(thr); /* cannot fail - Thread* must already exist */
2653 Thread* thr;
2659 thr = map_threads_maybe_lookup( tid );
2660 tl_assert(thr); /* cannot fail - Thread* must already exist */
2674 thr, "sem_init: initial value exceeds 10000; using 10000" );
2681 Thr* hbthr = thr->hbthr;
2700 Thread* thr;
2702 Thr* hbthr;
2708 thr = map_threads_maybe_lookup( tid );
2709 tl_assert(thr); /* cannot fail - Thread* must already exist */
2713 hbthr = thr->hbthr;
2728 Thread* thr;
2730 Thr* hbthr;
2736 thr = map_threads_maybe_lookup( tid );
2737 tl_assert(thr); /* cannot fail - Thread* must already exist */
2744 hbthr = thr->hbthr;
2754 thr, "Bug in libpthread: sem_wait succeeded on"
2830 Thread* thr;
2838 thr = map_threads_maybe_lookup( tid );
2839 tl_assert(thr); /* cannot fail - Thread* must already exist */
2843 thr, "pthread_barrier_init: 'count' argument is zero"
2849 thr, "pthread_barrier_init: invalid 'resizable' argument"
2858 thr, "pthread_barrier_init: barrier is already initialised"
2865 thr, "pthread_barrier_init: threads are waiting at barrier"
2885 Thread* thr;
2896 thr = map_threads_maybe_lookup( tid );
2897 tl_assert(thr); /* cannot fail - Thread* must already exist */
2904 thr, "pthread_barrier_destroy: barrier was never initialised"
2910 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2944 Thr* hbthr = t->hbthr;
2950 Thr* hbthr = t->hbthr;
3006 Thread* thr;
3015 thr = map_threads_maybe_lookup( tid );
3016 tl_assert(thr); /* cannot fail - Thread* must already exist */
3023 thr, "pthread_barrier_wait: barrier is uninitialised"
3032 VG_(addToXA)( bar->waiting, &thr );
3049 Thread* thr;
3058 thr = map_threads_maybe_lookup( tid );
3059 tl_assert(thr); /* cannot fail - Thread* must already exist */
3066 thr, "pthread_barrier_resize: barrier is uninitialised"
3073 thr, "pthread_barrier_resize: barrier is may not be resized"
3080 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3170 Thread* thr;
3177 thr = map_threads_maybe_lookup( tid );
3178 tl_assert(thr); /* cannot fail - Thread* must already exist */
3183 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3195 Thread* thr;
3202 thr = map_threads_maybe_lookup( tid );
3203 tl_assert(thr); /* cannot fail - Thread* must already exist */
3211 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3673 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3674 between 'lk' and the locks already held by 'thr' and issue a
3679 Thread* thr, /* NB: BEFORE lock is added */
3687 /* It may be that 'thr' already holds 'lk' and is recursively
3690 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3694 to any of the locks already held by thr, since if any such path
3699 other = laog__do_dfs_from_to(lk, thr->locksetA);
3722 thr, lk->guestaddr, other->guestaddr,
3727 thr, lk->guestaddr, other->guestaddr,
3733 (old, lk) | old <- locks already held by thr
3734 Since both old and lk are currently held by thr, their acquired_at
3738 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3855 Thread* thr; /* allocating thread */
3898 md->thr = map_threads_lookup( tid );
4048 md_new->thr = map_threads_lookup( tid );
4845 Thread* thr = map_threads_maybe_lookup( tid );
4846 tl_assert( thr ); /* I must be mapped */
4851 HG_(record_error_Misc)( thr, buf );
5067 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5069 Thread* thr;
5073 thr = libhb_get_Thr_hgthread( hbt );
5074 tl_assert(thr);
5075 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5084 ExeContext* for_libhb__get_EC ( Thr* hbt )
5086 Thread* thr;
5090 thr = libhb_get_Thr_hgthread( hbt );
5091 tl_assert(thr);
5092 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5101 Thr* hbthr_root;