Home | History | Annotate | Download | only in helgrind

Lines Matching defs:thr

172 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
176 static Thread* mk_Thread ( Thr* hbthr ) {
250 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
253 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
256 tl_assert(HG_(is_sane_Thread)(thr));
266 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
281 VG_(addToBag)( lk->heldBy, (UWord)thr );
290 /* assert: .. and that thread is 'thr'. */
291 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
293 VG_(addToBag)(lk->heldBy, (UWord)thr);
304 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
307 tl_assert(HG_(is_sane_Thread)(thr));
322 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
331 VG_(addToBag)(lk->heldBy, (UWord)thr);
335 VG_(addToBag)( lk->heldBy, (UWord)thr );
341 /* Update 'lk' to reflect a release of it by 'thr'. This is done
345 static void lockN_release ( Lock* lk, Thread* thr )
349 tl_assert(HG_(is_sane_Thread)(thr));
354 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
355 /* thr must actually have been a holder of lk */
370 Thread* thr;
377 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
378 tl_assert(HG_(is_sane_Thread)(thr));
380 thr->locksetA, (UWord)lk ));
381 thr->locksetA
382 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
386 thr->locksetW, (UWord)lk ));
387 thr->locksetW
388 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
511 Thread* thr;
515 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
517 VG_(printf)("%lu:%p ", count, thr);
521 count, thr->errmsg_index);
522 if (thr->coretid == VG_INVALID_THREADID)
525 VG_(printf)("tid %u ", thr->coretid);
601 static void initialise_data_structures ( Thr* hbthr_root )
603 Thread* thr;
639 thr = mk_Thread(hbthr_root);
640 thr->coretid = 1; /* FIXME: hardwires an assumption about the
643 libhb_set_Thr_hgthread(hbthr_root, thr);
646 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
647 tl_assert(thr->coretid != VG_INVALID_THREADID);
649 map_threads[thr->coretid] = thr;
664 Thread* thr;
666 thr = map_threads[coretid];
667 return thr;
673 Thread* thr;
675 thr = map_threads[coretid];
676 tl_assert(thr);
677 return thr;
680 /* Do a reverse lookup. Does not assert if 'thr' is not found in
682 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
685 tl_assert(HG_(is_sane_Thread)(thr));
689 tid = thr->coretid;
694 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
696 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
698 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
707 Thread* thr;
710 thr = map_threads[coretid];
711 tl_assert(thr);
715 static void HG_(thread_enter_synchr)(Thread *thr) {
716 tl_assert(thr->synchr_nesting >= 0);
718 thr->synchr_nesting += 1;
722 static void HG_(thread_leave_synchr)(Thread *thr) {
724 thr->synchr_nesting -= 1;
726 tl_assert(thr->synchr_nesting >= 0);
729 static void HG_(thread_enter_pthread_create)(Thread *thr) {
730 tl_assert(thr->pthread_create_nesting_level >= 0);
731 thr->pthread_create_nesting_level += 1;
734 static void HG_(thread_leave_pthread_create)(Thread *thr) {
735 tl_assert(thr->pthread_create_nesting_level > 0);
736 thr->pthread_create_nesting_level -= 1;
740 Thread *thr = map_threads_maybe_lookup(tid);
741 return thr->pthread_create_nesting_level;
819 // and the associated Segment has .thr == t
840 and seg->thr->lockset does not contain lk
855 seg->thr is a sane Thread
870 each thr in tset is a valid thread, which is non-dead
876 /* Return True iff 'thr' holds 'lk' in some mode. */
877 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
880 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
891 Thread* thr;
896 for (thr = admin_threads; thr; thr = thr->admin) {
897 if (!HG_(is_sane_Thread)(thr)) BAD("1");
898 wsA = thr->locksetA;
899 wsW = thr->locksetW;
909 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
951 Thread* thr;
955 (UWord*)&thr, &count )) {
958 tl_assert(HG_(is_sane_Thread)(thr));
959 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
963 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
966 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
1013 static void shadow_mem_scopy_range ( Thread* thr,
1016 Thr* hbthr = thr->hbthr;
1021 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1023 Thr* hbthr = thr->hbthr;
1028 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1029 Thr* hbthr = thr->hbthr;
1034 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1036 libhb_srange_new( thr->hbthr, a, len );
1039 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
1044 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1047 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1052 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1055 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1059 libhb_srange_untrack( thr->hbthr, aIN, len );
1070 /* Create a new segment for 'thr', making it depend (.prev) on its
1072 return both of them. Also update 'thr' so it references the new
1077 //zz Thread* thr )
1082 //zz tl_assert(HG_(is_sane_Thread)(thr));
1083 //zz cur_seg = map_segments_lookup( thr->csegid );
1085 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1087 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1090 //zz thr->csegid = *new_segidP;
1097 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1113 tl_assert(HG_(is_sane_Thread)(thr));
1117 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1121 tl_assert(thr->hbthr);
1127 lockN_acquire_writer( lk, thr );
1129 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1138 thr, "Bug in libpthread: write lock "
1147 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1149 thr, "Bug in libpthread: write lock "
1155 /* So the lock is already held in w-mode by 'thr'. That means this
1162 thr, "Bug in libpthread: recursive write lock "
1169 lockN_acquire_writer( lk, thr );
1172 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1179 laog__pre_thread_acquires_lock( thr, lk );
1182 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1183 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1194 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1210 tl_assert(HG_(is_sane_Thread)(thr));
1216 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1220 tl_assert(thr->hbthr);
1226 lockN_acquire_reader( lk, thr );
1228 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1236 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1244 lockN_acquire_reader( lk, thr );
1247 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1254 laog__pre_thread_acquires_lock( thr, lk );
1257 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1258 /* but don't update thr->locksetW, since lk is only rd-held */
1269 void evhH__pre_thread_releases_lock ( Thread* thr,
1285 tl_assert(HG_(is_sane_Thread)(thr));
1292 HG_(record_error_UnlockBogus)( thr, lock_ga );
1300 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1304 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1312 HG_(record_error_UnlockUnlocked)( thr, lock );
1313 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1314 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1324 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1333 tl_assert(realOwner != thr);
1334 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1335 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1336 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1343 lockN_release( lock, thr );
1350 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1355 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1357 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1359 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1385 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1388 thr->locksetA
1389 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1390 thr->locksetW
1391 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1393 tl_assert(thr->hbthr);
1397 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1450 Thread* thr;
1451 thr = get_current_Thread_in_C_C();
1452 if (LIKELY(thr))
1453 return thr;
1462 thr = map_threads_lookup( coretid );
1463 return thr;
1468 Thread *thr = get_current_Thread();
1471 shadow_mem_make_New( thr, a, len );
1474 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1475 shadow_mem_make_Untracked( thr, a, len );
1480 Thread *thr = get_current_Thread();
1483 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
1486 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1487 shadow_mem_make_Untracked( thr, a, len );
1492 Thread *thr = get_current_Thread();
1495 shadow_mem_make_New( thr, a, len );
1498 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1499 shadow_mem_make_Untracked( thr, a, len );
1505 Thread *thr = get_current_Thread();
1510 shadow_mem_make_New( thr, a, len );
1511 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1512 shadow_mem_make_Untracked( thr, a, len );
1575 Thread *thr = get_current_Thread();
1576 if (LIKELY(thr->synchr_nesting == 0))
1577 shadow_mem_scopy_range( thr , src, dst, len );
1592 Thr* hbthr_p;
1593 Thr* hbthr_c;
1660 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1715 Thread* thr;
1717 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1718 tl_assert(!thr);
1723 thr = map_threads_maybe_lookup(i);
1724 if (!thr)
1728 tl_assert(thr->hbthr);
1729 libhb_async_exit(thr->hbthr);
1730 tl_assert(thr->coretid == i);
1731 thr->coretid = VG_INVALID_THREADID;
1738 void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
1772 Thr* hbthr_s;
1773 Thr* hbthr_q;
1818 Thread *thr = map_threads_lookup(tid);
1819 if (LIKELY(thr->synchr_nesting == 0))
1820 shadow_mem_cread_range(thr, a, size);
1838 Thread *thr = map_threads_lookup(tid);
1840 if (LIKELY(thr->synchr_nesting == 0))
1841 shadow_mem_cread_range( thr, a, len+1 );
1852 Thread *thr = map_threads_lookup(tid);
1853 if (LIKELY(thr->synchr_nesting == 0))
1854 shadow_mem_cwrite_range(thr, a, size);
1872 Thread* thr;
1875 thr = get_current_Thread();
1876 tl_assert(thr);
1883 if (LIKELY(thr->synchr_nesting == 0))
1884 shadow_mem_cwrite_range(thr
1886 shadow_mem_make_NoAccess_AHAE( thr, a, len );
1888 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1916 Thread* thr = get_current_Thread_in_C_C();
1917 Thr* hbthr = thr->hbthr;
1918 if (LIKELY(thr->synchr_nesting == 0))
1924 Thread* thr = get_current_Thread_in_C_C();
1925 Thr* hbthr = thr->hbthr;
1926 if (LIKELY(thr->synchr_nesting == 0))
1932 Thread* thr = get_current_Thread_in_C_C();
1933 Thr* hbthr = thr->hbthr;
1934 if (LIKELY(thr->synchr_nesting == 0))
1940 Thread* thr = get_current_Thread_in_C_C();
1941 Thr* hbthr = thr->hbthr;
1942 if (LIKELY(thr->synchr_nesting == 0))
1948 Thread* thr = get_current_Thread_in_C_C();
1949 Thr* hbthr = thr->hbthr;
1950 if (LIKELY(thr->synchr_nesting == 0))
1956 Thread* thr = get_current_Thread_in_C_C();
1957 Thr* hbthr = thr->hbthr;
1958 if (LIKELY(thr->synchr_nesting == 0))
1964 Thread* thr = get_current_Thread_in_C_C();
1965 Thr* hbthr = thr->hbthr;
1966 if (LIKELY(thr->synchr_nesting == 0))
1972 Thread* thr = get_current_Thread_in_C_C();
1973 Thr* hbthr = thr->hbthr;
1974 if (LIKELY(thr->synchr_nesting == 0))
1980 Thread* thr = get_current_Thread_in_C_C();
1981 Thr* hbthr = thr->hbthr;
1982 if (LIKELY(thr->synchr_nesting == 0))
1988 Thread* thr = get_current_Thread_in_C_C();
1989 Thr* hbthr = thr->hbthr;
1990 if (LIKELY(thr->synchr_nesting == 0))
2021 Thread* thr;
2028 thr = map_threads_maybe_lookup( tid );
2030 tl_assert( HG_(is_sane_Thread)(thr) );
2044 thr, "pthread_mutex_destroy with invalid argument" );
2053 thr, "pthread_mutex_destroy of a locked mutex" );
2080 Thread* thr;
2087 thr = map_threads_maybe_lookup( tid );
2088 tl_assert(thr); /* cannot fail - Thread* must already exist */
2093 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2102 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2111 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2113 HG_(record_error_Misc)( thr, errstr );
2121 Thread* thr;
2126 thr = map_threads_maybe_lookup( tid );
2127 tl_assert(thr); /* cannot fail - Thread* must already exist */
2130 thr,
2139 Thread* thr;
2144 thr = map_threads_maybe_lookup( tid );
2145 tl_assert(thr); /* cannot fail - Thread* must already exist */
2147 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2153 Thread* thr;
2157 thr = map_threads_maybe_lookup( tid );
2158 tl_assert(thr); /* cannot fail - Thread* must already exist */
2174 Thread* thr;
2185 thr = map_threads_maybe_lookup( tid );
2187 tl_assert( HG_(is_sane_Thread)(thr) );
2194 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2305 Thread* thr;
2308 thr = map_threads_maybe_lookup( tid );
2309 tl_assert(thr); /* cannot fail - Thread* must already exist */
2319 thr, "pthread_cond_destroy:"
2337 thr, "pthread_cond_destroy: destruction of unknown cond var");
2350 Thread* thr;
2358 thr = map_threads_maybe_lookup( tid );
2359 tl_assert(thr); /* cannot fail - Thread* must already exist */
2393 HG_(record_error_Misc)(thr,
2397 HG_(record_error_Misc)(thr,
2401 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2402 HG_(record_error_Misc)(thr,
2412 //HG_(record_error_Misc)( thr,
2418 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2426 Thread* thr;
2436 thr = map_threads_maybe_lookup( tid );
2437 tl_assert(thr); /* cannot fail - Thread* must already exist */
2447 thr,
2454 thr, "pthread_cond_{timed}wait called with mutex "
2460 thr, "pthread_cond_{timed}wait called with un-held mutex");
2463 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2466 thr, "pthread_cond_{timed}wait called with mutex "
2482 thr, "pthread_cond_{timed}wait: cond is associated "
2497 Thread* thr;
2505 thr = map_threads_maybe_lookup( tid );
2506 tl_assert(thr); /* cannot fail - Thread* must already exist */
2516 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2529 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2535 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2590 Thread* thr;
2596 thr = map_threads_maybe_lookup( tid );
2598 tl_assert( HG_(is_sane_Thread)(thr) );
2604 thr, "pthread_rwlock_destroy with invalid argument" );
2613 thr, "pthread_rwlock_destroy of a locked mutex" );
2641 Thread* thr;
2649 thr = map_threads_maybe_lookup( tid );
2650 tl_assert(thr); /* cannot fail - Thread* must already exist */
2657 thr, "pthread_rwlock_{rd,rw}lock with a "
2666 Thread* thr;
2672 thr = map_threads_maybe_lookup( tid );
2673 tl_assert(thr); /* cannot fail - Thread* must already exist */
2677 thr,
2686 Thread* thr;
2691 thr = map_threads_maybe_lookup( tid );
2692 tl_assert(thr); /* cannot fail - Thread* must already exist */
2694 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2700 Thread* thr;
2704 thr = map_threads_maybe_lookup( tid );
2705 tl_assert(thr); /* cannot fail - Thread* must already exist */
2831 Thread* thr;
2837 thr = map_threads_maybe_lookup( tid );
2838 tl_assert(thr); /* cannot fail - Thread* must already exist */
2852 thr, "sem_init: initial value exceeds 10000; using 10000" );
2859 Thr* hbthr = thr->hbthr;
2878 Thread* thr;
2880 Thr* hbthr;
2886 thr = map_threads_maybe_lookup( tid );
2887 tl_assert(thr); /* cannot fail - Thread* must already exist */
2891 hbthr = thr->hbthr;
2906 Thread* thr;
2908 Thr* hbthr;
2914 thr = map_threads_maybe_lookup( tid );
2915 tl_assert(thr); /* cannot fail - Thread* must already exist */
2922 hbthr = thr->hbthr;
2932 thr, "Bug in libpthread: sem_wait succeeded on"
3006 Thread* thr;
3014 thr = map_threads_maybe_lookup( tid );
3015 tl_assert(thr); /* cannot fail - Thread* must already exist */
3019 thr, "pthread_barrier_init: 'count' argument is zero"
3025 thr, "pthread_barrier_init: invalid 'resizable' argument"
3034 thr, "pthread_barrier_init: barrier is already initialised"
3041 thr, "pthread_barrier_init: threads are waiting at barrier"
3060 Thread* thr;
3071 thr = map_threads_maybe_lookup( tid );
3072 tl_assert(thr); /* cannot fail - Thread* must already exist */
3079 thr, "pthread_barrier_destroy: barrier was never initialised"
3085 thr, "pthread_barrier_destroy: threads are waiting at barrier"
3119 Thr* hbthr = t->hbthr;
3125 Thr* hbthr = t->hbthr;
3181 Thread* thr;
3190 thr = map_threads_maybe_lookup( tid );
3191 tl_assert(thr); /* cannot fail - Thread* must already exist */
3198 thr, "pthread_barrier_wait: barrier is uninitialised"
3207 VG_(addToXA)( bar->waiting, &thr );
3224 Thread* thr;
3233 thr = map_threads_maybe_lookup( tid );
3234 tl_assert(thr); /* cannot fail - Thread* must already exist */
3241 thr, "pthread_barrier_resize: barrier is uninitialised"
3248 thr, "pthread_barrier_resize: barrier is may not be resized"
3255 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3344 Thread* thr;
3351 thr = map_threads_maybe_lookup( tid );
3352 tl_assert(thr); /* cannot fail - Thread* must already exist */
3357 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3369 Thread* thr;
3376 thr = map_threads_maybe_lookup( tid );
3377 tl_assert(thr); /* cannot fail - Thread* must already exist */
3385 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3417 Thread *thr = map_threads_maybe_lookup(tid);
3418 tl_assert(thr != NULL);
3421 if ((bindflag & thr->bind_guard_flag) == 0) {
3422 thr->bind_guard_flag |= bindflag;
3423 HG_(thread_enter_synchr)(thr);
3425 HG_(thread_enter_pthread_create)(thr);
3437 Thread *thr = map_threads_maybe_lookup(tid);
3438 tl_assert(thr != NULL);
3441 if ((thr->bind_guard_flag & bindflag) != 0) {
3442 thr->bind_guard_flag &= ~bindflag;
3443 HG_(thread_leave_synchr)(thr);
3444 HG_(thread_leave_pthread_create)(thr);
3886 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3887 between 'lk' and the locks already held by 'thr' and issue a
3892 Thread* thr, /* NB: BEFORE lock is added */
3900 /* It may be that 'thr' already holds 'lk' and is recursively
3903 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3907 to any of the locks already held by thr, since if any such path
3912 other = laog__do_dfs_from_to(lk, thr->locksetA);
3935 thr, lk, other,
3985 thr, lk, other,
3991 (old, lk) | old <- locks already held by thr
3992 Since both old and lk are currently held by thr, their acquired_at
3996 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
4113 Thread* thr; /* allocating thread */
4159 md->thr = map_threads_lookup( tid );
4315 md_new->thr = map_threads_lookup( tid );
4389 if (tnr) *tnr = mm->thr->errmsg_index;
5475 Thread* thr = map_threads_maybe_lookup( tid );
5476 tl_assert( thr ); /* I must be mapped */
5481 HG_(record_error_Misc)( thr, buf );
5510 Thread *thr = map_threads_maybe_lookup(tid);
5512 HG_(thread_enter_pthread_create)(thr);
5513 HG_(thread_enter_synchr)(thr);
5519 Thread *thr = map_threads_maybe_lookup(tid);
5521 HG_(thread_leave_pthread_create)(thr);
5522 HG_(thread_leave_synchr)(thr);
5769 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5771 Thread* thr;
5775 thr = libhb_get_Thr_hgthread( hbt );
5776 tl_assert(thr);
5777 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5786 ExeContext* for_libhb__get_EC ( Thr* hbt )
5788 Thread* thr;
5792 thr = libhb_get_Thr_hgthread( hbt );
5793 tl_assert(thr);
5794 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5803 Thr* hbthr_root;