Lines Matching refs:Thr
170 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
174 static Thread* mk_Thread ( Thr* hbthr ) {
248 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
251 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
254 tl_assert(HG_(is_sane_Thread)(thr));
264 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
279 VG_(addToBag)( lk->heldBy, (UWord)thr );
288 /* assert: .. and that thread is 'thr'. */
289 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
291 VG_(addToBag)(lk->heldBy, (UWord)thr);
302 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
305 tl_assert(HG_(is_sane_Thread)(thr));
320 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
329 VG_(addToBag)(lk->heldBy, (UWord)thr);
333 VG_(addToBag)( lk->heldBy, (UWord)thr );
339 /* Update 'lk' to reflect a release of it by 'thr'. This is done
343 static void lockN_release ( Lock* lk, Thread* thr )
347 tl_assert(HG_(is_sane_Thread)(thr));
352 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
353 /* thr must actually have been a holder of lk */
368 Thread* thr;
375 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
376 tl_assert(HG_(is_sane_Thread)(thr));
378 thr->locksetA, (UWord)lk ));
379 thr->locksetA
380 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
384 thr->locksetW, (UWord)lk ));
385 thr->locksetW
386 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
509 Thread* thr;
513 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
515 VG_(printf)("%lu:%p ", count, thr);
519 count, thr->errmsg_index);
520 if (thr->coretid == VG_INVALID_THREADID)
523 VG_(printf)("tid %u ", thr->coretid);
599 static void initialise_data_structures ( Thr* hbthr_root )
601 Thread* thr;
637 thr = mk_Thread(hbthr_root);
638 thr->coretid = 1; /* FIXME: hardwires an assumption about the
641 libhb_set_Thr_hgthread(hbthr_root, thr);
644 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
645 tl_assert(thr->coretid != VG_INVALID_THREADID);
647 map_threads[thr->coretid] = thr;
662 Thread* thr;
664 thr = map_threads[coretid];
665 return thr;
671 Thread* thr;
673 thr = map_threads[coretid];
674 tl_assert(thr);
675 return thr;
678 /* Do a reverse lookup. Does not assert if 'thr' is not found in
680 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
683 tl_assert(HG_(is_sane_Thread)(thr));
687 tid = thr->coretid;
692 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
694 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
696 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
705 Thread* thr;
708 thr = map_threads[coretid];
709 tl_assert(thr);
713 static void HG_(thread_enter_synchr)(Thread *thr) {
714 tl_assert(thr->synchr_nesting >= 0);
716 thr->synchr_nesting += 1;
720 static void HG_(thread_leave_synchr)(Thread *thr) {
722 thr->synchr_nesting -= 1;
724 tl_assert(thr->synchr_nesting >= 0);
727 static void HG_(thread_enter_pthread_create)(Thread *thr) {
728 tl_assert(thr->pthread_create_nesting_level >= 0);
729 thr->pthread_create_nesting_level += 1;
732 static void HG_(thread_leave_pthread_create)(Thread *thr) {
733 tl_assert(thr->pthread_create_nesting_level > 0);
734 thr->pthread_create_nesting_level -= 1;
738 Thread *thr = map_threads_maybe_lookup(tid);
739 return thr->pthread_create_nesting_level;
817 // and the associated Segment has .thr == t
838 and seg->thr->lockset does not contain lk
853 seg->thr is a sane Thread
868 each thr in tset is a valid thread, which is non-dead
874 /* Return True iff 'thr' holds 'lk' in some mode. */
875 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
878 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
889 Thread* thr;
894 for (thr = admin_threads; thr; thr = thr->admin) {
895 if (!HG_(is_sane_Thread)(thr)) BAD("1");
896 wsA = thr->locksetA;
897 wsW = thr->locksetW;
907 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
949 Thread* thr;
953 (UWord*)&thr, &count )) {
956 tl_assert(HG_(is_sane_Thread)(thr));
957 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
961 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
964 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
1011 static void shadow_mem_scopy_range ( Thread* thr,
1014 Thr* hbthr = thr->hbthr;
1019 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1021 Thr* hbthr = thr->hbthr;
1026 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1027 Thr* hbthr = thr->hbthr;
1032 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1034 libhb_srange_new( thr->hbthr, a, len );
1037 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
1042 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1045 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1050 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1053 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1057 libhb_srange_untrack( thr->hbthr, aIN, len );
1068 /* Create a new segment for 'thr', making it depend (.prev) on its
1070 return both of them. Also update 'thr' so it references the new
1075 //zz Thread* thr )
1080 //zz tl_assert(HG_(is_sane_Thread)(thr));
1081 //zz cur_seg = map_segments_lookup( thr->csegid );
1083 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1085 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1088 //zz thr->csegid = *new_segidP;
1095 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1111 tl_assert(HG_(is_sane_Thread)(thr));
1115 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1119 tl_assert(thr->hbthr);
1125 lockN_acquire_writer( lk, thr );
1127 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1136 thr, "Bug in libpthread: write lock "
1145 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1147 thr, "Bug in libpthread: write lock "
1153 /* So the lock is already held in w-mode by 'thr'. That means this
1160 thr, "Bug in libpthread: recursive write lock "
1167 lockN_acquire_writer( lk, thr );
1170 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1177 laog__pre_thread_acquires_lock( thr, lk );
1180 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1181 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1192 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1208 tl_assert(HG_(is_sane_Thread)(thr));
1214 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1218 tl_assert(thr->hbthr);
1224 lockN_acquire_reader( lk, thr );
1226 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1234 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1242 lockN_acquire_reader( lk, thr );
1245 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1252 laog__pre_thread_acquires_lock( thr, lk );
1255 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1256 /* but don't update thr->locksetW, since lk is only rd-held */
1267 void evhH__pre_thread_releases_lock ( Thread* thr,
1283 tl_assert(HG_(is_sane_Thread)(thr));
1290 HG_(record_error_UnlockBogus)( thr, lock_ga );
1298 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1302 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1310 HG_(record_error_UnlockUnlocked)( thr, lock );
1311 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1312 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1322 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1331 tl_assert(realOwner != thr);
1332 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1333 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1334 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1341 lockN_release( lock, thr );
1348 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1353 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1355 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1357 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1383 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1386 thr->locksetA
1387 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1388 thr->locksetW
1389 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1391 tl_assert(thr->hbthr);
1395 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1448 Thread* thr;
1449 thr = get_current_Thread_in_C_C();
1450 if (LIKELY(thr))
1451 return thr;
1460 thr = map_threads_lookup( coretid );
1461 return thr;
1466 Thread *thr = get_current_Thread();
1469 shadow_mem_make_New( thr, a, len );
1472 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1473 shadow_mem_make_Untracked( thr, a, len );
1478 Thread *thr = get_current_Thread();
1481 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
1484 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1485 shadow_mem_make_Untracked( thr, a, len );
1490 Thread *thr = get_current_Thread();
1493 shadow_mem_make_New( thr, a, len );
1496 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1497 shadow_mem_make_Untracked( thr, a, len );
1503 Thread *thr = get_current_Thread();
1508 shadow_mem_make_New( thr, a, len );
1509 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1510 shadow_mem_make_Untracked( thr, a, len );
1573 Thread *thr = get_current_Thread();
1574 if (LIKELY(thr->synchr_nesting == 0))
1575 shadow_mem_scopy_range( thr , src, dst, len );
1590 Thr* hbthr_p;
1591 Thr* hbthr_c;
1658 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1713 Thread* thr;
1715 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1716 tl_assert(!thr);
1721 thr = map_threads_maybe_lookup(i);
1722 if (!thr)
1726 tl_assert(thr->hbthr);
1727 libhb_async_exit(thr->hbthr);
1728 tl_assert(thr->coretid == i);
1729 thr->coretid = VG_INVALID_THREADID;
1736 void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
1770 Thr* hbthr_s;
1771 Thr* hbthr_q;
1816 Thread *thr = map_threads_lookup(tid);
1817 if (LIKELY(thr->synchr_nesting == 0))
1818 shadow_mem_cread_range(thr, a, size);
1836 Thread *thr = map_threads_lookup(tid);
1838 if (LIKELY(thr->synchr_nesting == 0))
1839 shadow_mem_cread_range( thr, a, len+1 );
1850 Thread *thr = map_threads_lookup(tid);
1851 if (LIKELY(thr->synchr_nesting == 0))
1852 shadow_mem_cwrite_range(thr, a, size);
1870 Thread* thr;
1873 thr = get_current_Thread();
1874 tl_assert(thr);
1881 if (LIKELY(thr->synchr_nesting == 0))
1882 shadow_mem_cwrite_range(thr, a, len);
1884 shadow_mem_make_NoAccess_AHAE( thr
1886 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1914 Thread* thr = get_current_Thread_in_C_C();
1915 Thr* hbthr = thr->hbthr;
1916 if (LIKELY(thr->synchr_nesting == 0))
1922 Thread* thr = get_current_Thread_in_C_C();
1923 Thr* hbthr = thr->hbthr;
1924 if (LIKELY(thr->synchr_nesting == 0))
1930 Thread* thr = get_current_Thread_in_C_C();
1931 Thr* hbthr = thr->hbthr;
1932 if (LIKELY(thr->synchr_nesting == 0))
1938 Thread* thr = get_current_Thread_in_C_C();
1939 Thr* hbthr = thr->hbthr;
1940 if (LIKELY(thr->synchr_nesting == 0))
1946 Thread* thr = get_current_Thread_in_C_C();
1947 Thr* hbthr = thr->hbthr;
1948 if (LIKELY(thr->synchr_nesting == 0))
1954 Thread* thr = get_current_Thread_in_C_C();
1955 Thr* hbthr = thr->hbthr;
1956 if (LIKELY(thr->synchr_nesting == 0))
1962 Thread* thr = get_current_Thread_in_C_C();
1963 Thr* hbthr = thr->hbthr;
1964 if (LIKELY(thr->synchr_nesting == 0))
1970 Thread* thr = get_current_Thread_in_C_C();
1971 Thr* hbthr = thr->hbthr;
1972 if (LIKELY(thr->synchr_nesting == 0))
1978 Thread* thr = get_current_Thread_in_C_C();
1979 Thr* hbthr = thr->hbthr;
1980 if (LIKELY(thr->synchr_nesting == 0))
1986 Thread* thr = get_current_Thread_in_C_C();
1987 Thr* hbthr = thr->hbthr;
1988 if (LIKELY(thr->synchr_nesting == 0))
2019 Thread* thr;
2026 thr = map_threads_maybe_lookup( tid );
2028 tl_assert( HG_(is_sane_Thread)(thr) );
2042 thr, "pthread_mutex_destroy with invalid argument" );
2051 thr, "pthread_mutex_destroy of a locked mutex" );
2078 Thread* thr;
2085 thr = map_threads_maybe_lookup( tid );
2086 tl_assert(thr); /* cannot fail - Thread* must already exist */
2091 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2100 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2109 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2111 HG_(record_error_Misc)( thr, errstr );
2119 Thread* thr;
2124 thr = map_threads_maybe_lookup( tid );
2125 tl_assert(thr); /* cannot fail - Thread* must already exist */
2128 thr,
2137 Thread* thr;
2142 thr = map_threads_maybe_lookup( tid );
2143 tl_assert(thr); /* cannot fail - Thread* must already exist */
2145 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2151 Thread* thr;
2155 thr = map_threads_maybe_lookup( tid );
2156 tl_assert(thr); /* cannot fail - Thread* must already exist */
2172 Thread* thr;
2183 thr = map_threads_maybe_lookup( tid );
2185 tl_assert( HG_(is_sane_Thread)(thr) );
2192 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2303 Thread* thr;
2306 thr = map_threads_maybe_lookup( tid );
2307 tl_assert(thr); /* cannot fail - Thread* must already exist */
2317 thr, "pthread_cond_destroy:"
2335 thr, "pthread_cond_destroy: destruction of unknown cond var");
2348 Thread* thr;
2356 thr = map_threads_maybe_lookup( tid );
2357 tl_assert(thr); /* cannot fail - Thread* must already exist */
2391 HG_(record_error_Misc)(thr,
2395 HG_(record_error_Misc)(thr,
2399 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2400 HG_(record_error_Misc)(thr,
2410 //HG_(record_error_Misc)( thr,
2416 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2424 Thread* thr;
2434 thr = map_threads_maybe_lookup( tid );
2435 tl_assert(thr); /* cannot fail - Thread* must already exist */
2445 thr,
2452 thr, "pthread_cond_{timed}wait called with mutex "
2458 thr, "pthread_cond_{timed}wait called with un-held mutex");
2461 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2464 thr, "pthread_cond_{timed}wait called with mutex "
2480 thr, "pthread_cond_{timed}wait: cond is associated "
2495 Thread* thr;
2503 thr = map_threads_maybe_lookup( tid );
2504 tl_assert(thr); /* cannot fail - Thread* must already exist */
2514 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2527 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2533 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2588 Thread* thr;
2594 thr = map_threads_maybe_lookup( tid );
2596 tl_assert( HG_(is_sane_Thread)(thr) );
2602 thr, "pthread_rwlock_destroy with invalid argument" );
2611 thr, "pthread_rwlock_destroy of a locked mutex" );
2639 Thread* thr;
2647 thr = map_threads_maybe_lookup( tid );
2648 tl_assert(thr); /* cannot fail - Thread* must already exist */
2655 thr, "pthread_rwlock_{rd,rw}lock with a "
2664 Thread* thr;
2670 thr = map_threads_maybe_lookup( tid );
2671 tl_assert(thr); /* cannot fail - Thread* must already exist */
2675 thr,
2684 Thread* thr;
2689 thr = map_threads_maybe_lookup( tid );
2690 tl_assert(thr); /* cannot fail - Thread* must already exist */
2692 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2698 Thread* thr;
2702 thr = map_threads_maybe_lookup( tid );
2703 tl_assert(thr); /* cannot fail - Thread* must already exist */
2829 Thread* thr;
2835 thr = map_threads_maybe_lookup( tid );
2836 tl_assert(thr); /* cannot fail - Thread* must already exist */
2850 thr, "sem_init: initial value exceeds 10000; using 10000" );
2857 Thr* hbthr = thr->hbthr;
2876 Thread* thr;
2878 Thr* hbthr;
2884 thr = map_threads_maybe_lookup( tid );
2885 tl_assert(thr); /* cannot fail - Thread* must already exist */
2889 hbthr = thr->hbthr;
2904 Thread* thr;
2906 Thr* hbthr;
2912 thr = map_threads_maybe_lookup( tid );
2913 tl_assert(thr); /* cannot fail - Thread* must already exist */
2920 hbthr = thr->hbthr;
2930 thr, "Bug in libpthread: sem_wait succeeded on"
3004 Thread* thr;
3012 thr = map_threads_maybe_lookup( tid );
3013 tl_assert(thr); /* cannot fail - Thread* must already exist */
3017 thr, "pthread_barrier_init: 'count' argument is zero"
3023 thr, "pthread_barrier_init: invalid 'resizable' argument"
3032 thr, "pthread_barrier_init: barrier is already initialised"
3039 thr, "pthread_barrier_init: threads are waiting at barrier"
3058 Thread* thr;
3069 thr = map_threads_maybe_lookup( tid );
3070 tl_assert(thr); /* cannot fail - Thread* must already exist */
3077 thr, "pthread_barrier_destroy: barrier was never initialised"
3083 thr, "pthread_barrier_destroy: threads are waiting at barrier"
3117 Thr* hbthr = t->hbthr;
3123 Thr* hbthr = t->hbthr;
3179 Thread* thr;
3188 thr = map_threads_maybe_lookup( tid );
3189 tl_assert(thr); /* cannot fail - Thread* must already exist */
3196 thr, "pthread_barrier_wait: barrier is uninitialised"
3205 VG_(addToXA)( bar->waiting, &thr );
3222 Thread* thr;
3231 thr = map_threads_maybe_lookup( tid );
3232 tl_assert(thr); /* cannot fail - Thread* must already exist */
3239 thr, "pthread_barrier_resize: barrier is uninitialised"
3246 thr, "pthread_barrier_resize: barrier is may not be resized"
3253 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3342 Thread* thr;
3349 thr = map_threads_maybe_lookup( tid );
3350 tl_assert(thr); /* cannot fail - Thread* must already exist */
3355 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3367 Thread* thr;
3374 thr = map_threads_maybe_lookup( tid );
3375 tl_assert(thr); /* cannot fail - Thread* must already exist */
3383 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3415 Thread *thr = map_threads_maybe_lookup(tid);
3416 tl_assert(thr != NULL);
3419 if ((bindflag & thr->bind_guard_flag) == 0) {
3420 thr->bind_guard_flag |= bindflag;
3421 HG_(thread_enter_synchr)(thr);
3423 HG_(thread_enter_pthread_create)(thr);
3435 Thread *thr = map_threads_maybe_lookup(tid);
3436 tl_assert(thr != NULL);
3439 if ((thr->bind_guard_flag & bindflag) != 0) {
3440 thr->bind_guard_flag &= ~bindflag;
3441 HG_(thread_leave_synchr)(thr);
3442 HG_(thread_leave_pthread_create)(thr);
3884 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3885 between 'lk' and the locks already held by 'thr' and issue a
3890 Thread* thr, /* NB: BEFORE lock is added */
3898 /* It may be that 'thr' already holds 'lk' and is recursively
3901 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3905 to any of the locks already held by thr, since if any such path
3910 other = laog__do_dfs_from_to(lk, thr->locksetA);
3933 thr, lk, other,
3983 thr, lk, other,
3989 (old, lk) | old <- locks already held by thr
3990 Since both old and lk are currently held by thr, their acquired_at
3994 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
4111 Thread* thr; /* allocating thread */
4157 md->thr = map_threads_lookup( tid );
4307 md_new->thr = map_threads_lookup( tid );
4381 if (tnr) *tnr = mm->thr->errmsg_index;
5415 Thread* thr = map_threads_maybe_lookup( tid );
5416 tl_assert( thr ); /* I must be mapped */
5421 HG_(record_error_Misc)( thr, buf );
5450 Thread *thr = map_threads_maybe_lookup(tid);
5452 HG_(thread_enter_pthread_create)(thr);
5453 HG_(thread_enter_synchr)(thr);
5459 Thread *thr = map_threads_maybe_lookup(tid);
5461 HG_(thread_leave_pthread_create)(thr);
5462 HG_(thread_leave_synchr)(thr);
5708 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5710 Thread* thr;
5714 thr = libhb_get_Thr_hgthread( hbt );
5715 tl_assert(thr);
5716 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5725 ExeContext* for_libhb__get_EC ( Thr* hbt )
5727 Thread* thr;
5731 thr = libhb_get_Thr_hgthread( hbt );
5732 tl_assert(thr);
5733 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5742 Thr* hbthr_root;