Home | History | Annotate | Download | only in drd
      1 /*
      2   This file is part of drd, a thread error detector.
      3 
      4   Copyright (C) 2006-2013 Bart Van Assche <bvanassche (at) acm.org>.
      5 
      6   This program is free software; you can redistribute it and/or
      7   modify it under the terms of the GNU General Public License as
      8   published by the Free Software Foundation; either version 2 of the
      9   License, or (at your option) any later version.
     10 
     11   This program is distributed in the hope that it will be useful, but
     12   WITHOUT ANY WARRANTY; without even the implied warranty of
     13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14   General Public License for more details.
     15 
     16   You should have received a copy of the GNU General Public License
     17   along with this program; if not, write to the Free Software
     18   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     19   02111-1307, USA.
     20 
     21   The GNU General Public License is contained in the file COPYING.
     22 */
     23 
     24 
     25 #include "drd_error.h"
     26 #include "drd_barrier.h"
     27 #include "drd_clientobj.h"
     28 #include "drd_cond.h"
     29 #include "drd_mutex.h"
     30 #include "drd_segment.h"
     31 #include "drd_semaphore.h"
     32 #include "drd_suppression.h"
     33 #include "drd_thread.h"
     34 #include "pub_tool_vki.h"
     35 #include "pub_tool_basics.h"      // Addr, SizeT
     36 #include "pub_tool_libcassert.h"  // tl_assert()
     37 #include "pub_tool_libcbase.h"    // VG_(strlen)()
     38 #include "pub_tool_libcprint.h"   // VG_(printf)()
     39 #include "pub_tool_machine.h"
     40 #include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
     41 #include "pub_tool_options.h"     // VG_(clo_backtrace_size)
     42 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
     43 
     44 
     45 
     46 /* Local functions. */
     47 
     48 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
     49 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
     50 static void thread_compute_conflict_set(struct bitmap** conflict_set,
     51                                         const DrdThreadId tid);
     52 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
     53 
     54 
     55 /* Local variables. */
     56 
     57 static ULong    s_context_switch_count;
     58 static ULong    s_discard_ordered_segments_count;
     59 static ULong    s_compute_conflict_set_count;
     60 static ULong    s_update_conflict_set_count;
     61 static ULong    s_update_conflict_set_new_sg_count;
     62 static ULong    s_update_conflict_set_sync_count;
     63 static ULong    s_update_conflict_set_join_count;
     64 static ULong    s_conflict_set_bitmap_creation_count;
     65 static ULong    s_conflict_set_bitmap2_creation_count;
     66 static ThreadId s_vg_running_tid  = VG_INVALID_THREADID;
     67 DrdThreadId     DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
     68 ThreadInfo*     DRD_(g_threadinfo);
     69 struct bitmap*  DRD_(g_conflict_set);
     70 Bool DRD_(verify_conflict_set);
     71 static Bool     s_trace_context_switches = False;
     72 static Bool     s_trace_conflict_set = False;
     73 static Bool     s_trace_conflict_set_bm = False;
     74 static Bool     s_trace_fork_join = False;
     75 static Bool     s_segment_merging = True;
     76 static Bool     s_new_segments_since_last_merge;
     77 static int      s_segment_merge_interval = 10;
     78 static unsigned s_join_list_vol = 10;
     79 static unsigned s_deletion_head;
     80 static unsigned s_deletion_tail;
     81 
     82 
     83 /* Function definitions. */
     84 
     85 /** Enables/disables context switch tracing. */
     86 void DRD_(thread_trace_context_switches)(const Bool t)
     87 {
     88    tl_assert(t == False || t == True);
     89    s_trace_context_switches = t;
     90 }
     91 
     92 /** Enables/disables conflict set tracing. */
     93 void DRD_(thread_trace_conflict_set)(const Bool t)
     94 {
     95    tl_assert(t == False || t == True);
     96    s_trace_conflict_set = t;
     97 }
     98 
     99 /** Enables/disables conflict set bitmap tracing. */
    100 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
    101 {
    102    tl_assert(t == False || t == True);
    103    s_trace_conflict_set_bm = t;
    104 }
    105 
    106 /** Report whether fork/join tracing is enabled. */
    107 Bool DRD_(thread_get_trace_fork_join)(void)
    108 {
    109    return s_trace_fork_join;
    110 }
    111 
    112 /** Enables/disables fork/join tracing. */
    113 void DRD_(thread_set_trace_fork_join)(const Bool t)
    114 {
    115    tl_assert(t == False || t == True);
    116    s_trace_fork_join = t;
    117 }
    118 
    119 /** Enables/disables segment merging. */
    120 void DRD_(thread_set_segment_merging)(const Bool m)
    121 {
    122    tl_assert(m == False || m == True);
    123    s_segment_merging = m;
    124 }
    125 
    126 /** Get the segment merging interval. */
    127 int DRD_(thread_get_segment_merge_interval)(void)
    128 {
    129    return s_segment_merge_interval;
    130 }
    131 
    132 /** Set the segment merging interval. */
    133 void DRD_(thread_set_segment_merge_interval)(const int i)
    134 {
    135    s_segment_merge_interval = i;
    136 }
    137 
    138 void DRD_(thread_set_join_list_vol)(const int jlv)
    139 {
    140    s_join_list_vol = jlv;
    141 }
    142 
    143 void DRD_(thread_init)(void)
    144 {
    145    DRD_(g_threadinfo) = VG_(malloc)("drd.main.ti.1",
    146                                 DRD_N_THREADS * sizeof DRD_(g_threadinfo)[0]);
    147    for (UInt i = 0; i < DRD_N_THREADS; ++i) {
    148       static ThreadInfo initval;
    149       DRD_(g_threadinfo)[i] = initval;
    150    }
    151 }
    152 
    153 /**
    154  * Convert Valgrind's ThreadId into a DrdThreadId.
    155  *
    156  * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
    157  *         Valgrind ThreadId does not yet exist.
    158  */
    159 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
    160 {
    161    UInt i;
    162 
    163    if (tid == VG_INVALID_THREADID)
    164       return DRD_INVALID_THREADID;
    165 
    166    for (i = 1; i < DRD_N_THREADS; i++)
    167    {
    168       if (DRD_(g_threadinfo)[i].vg_thread_exists == True
    169           && DRD_(g_threadinfo)[i].vg_threadid == tid)
    170       {
    171          return i;
    172       }
    173    }
    174 
    175    return DRD_INVALID_THREADID;
    176 }
    177 
    178 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
    179 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
    180 {
    181    UInt i;
    182 
    183    tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
    184 
    185    for (i = 1; i < DRD_N_THREADS; i++)
    186    {
    187       if (!DRD_(g_threadinfo)[i].valid)
    188       {
    189          tl_assert(! DRD_(IsValidDrdThreadId)(i));
    190 
    191          DRD_(g_threadinfo)[i].valid         = True;
    192          DRD_(g_threadinfo)[i].vg_thread_exists = True;
    193          DRD_(g_threadinfo)[i].vg_threadid   = tid;
    194          DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
    195          DRD_(g_threadinfo)[i].stack_min     = 0;
    196          DRD_(g_threadinfo)[i].stack_min_min = 0;
    197          DRD_(g_threadinfo)[i].stack_startup = 0;
    198          DRD_(g_threadinfo)[i].stack_max     = 0;
    199          DRD_(thread_set_name)(i, "");
    200          DRD_(g_threadinfo)[i].on_alt_stack        = False;
    201          DRD_(g_threadinfo)[i].is_recording_loads  = True;
    202          DRD_(g_threadinfo)[i].is_recording_stores = True;
    203          DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
    204          DRD_(g_threadinfo)[i].synchr_nesting = 0;
    205          DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
    206          tl_assert(DRD_(g_threadinfo)[i].sg_first == NULL);
    207          tl_assert(DRD_(g_threadinfo)[i].sg_last == NULL);
    208 
    209          tl_assert(DRD_(IsValidDrdThreadId)(i));
    210 
    211          return i;
    212       }
    213    }
    214 
    215    VG_(printf)(
    216 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
    217 "Aborting.\n");
    218 
    219    tl_assert(False);
    220 
    221    return DRD_INVALID_THREADID;
    222 }
    223 
    224 /** Convert a POSIX thread ID into a DRD thread ID. */
    225 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
    226 {
    227    UInt i;
    228 
    229    if (tid != INVALID_POSIX_THREADID)
    230    {
    231       for (i = 1; i < DRD_N_THREADS; i++)
    232       {
    233          if (DRD_(g_threadinfo)[i].posix_thread_exists
    234              && DRD_(g_threadinfo)[i].pt_threadid == tid)
    235          {
    236             return i;
    237          }
    238       }
    239    }
    240    return DRD_INVALID_THREADID;
    241 }
    242 
    243 /** Convert a DRD thread ID into a Valgrind thread ID. */
    244 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
    245 {
    246    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    247              && tid != DRD_INVALID_THREADID);
    248 
    249    return (DRD_(g_threadinfo)[tid].vg_thread_exists
    250            ? DRD_(g_threadinfo)[tid].vg_threadid
    251            : VG_INVALID_THREADID);
    252 }
    253 
    254 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    255 /**
    256  * Sanity check of the doubly linked list of segments referenced by a
    257  * ThreadInfo struct.
    258  * @return True if sane, False if not.
    259  */
    260 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
    261 {
    262    Segment* p;
    263 
    264    for (p = ti->sg_first; p; p = p->thr_next) {
    265       if (p->thr_next && p->thr_next->thr_prev != p)
    266          return False;
    267       if (p->thr_next == 0 && p != ti->sg_last)
    268          return False;
    269    }
    270    for (p = ti->sg_last; p; p = p->thr_prev) {
    271       if (p->thr_prev && p->thr_prev->thr_next != p)
    272          return False;
    273       if (p->thr_prev == 0 && p != ti->sg_first)
    274          return False;
    275    }
    276    return True;
    277 }
    278 #endif
    279 
    280 /**
    281  * Create the first segment for a newly started thread.
    282  *
    283  * This function is called from the handler installed via
    284  * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
    285  * from the context of the creator thread, before the new thread has been
    286  * created.
    287  *
    288  * @param[in] creator    DRD thread ID of the creator thread.
    289  * @param[in] vg_created Valgrind thread ID of the created thread.
    290  *
    291  * @return DRD thread ID of the created thread.
    292  */
    293 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
    294                                     const ThreadId vg_created)
    295 {
    296    DrdThreadId created;
    297 
    298    tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
    299    created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
    300    tl_assert(0 <= (int)created && created < DRD_N_THREADS
    301              && created != DRD_INVALID_THREADID);
    302 
    303    tl_assert(DRD_(g_threadinfo)[created].sg_first == NULL);
    304    tl_assert(DRD_(g_threadinfo)[created].sg_last == NULL);
    305    /* Create an initial segment for the newly created thread. */
    306    thread_append_segment(created, DRD_(sg_new)(creator, created));
    307 
    308    return created;
    309 }
    310 
    311 /**
    312  * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
    313  * after the thread has been created and before any client instructions are run
    314  * on the newly created thread, e.g. from the handler installed via
    315  * VG_(track_pre_thread_first_insn)().
    316  *
    317  * @param[in] vg_created Valgrind thread ID of the newly created thread.
    318  *
    319  * @return DRD thread ID for the new thread.
    320  */
    321 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
    322 {
    323    const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
    324 
    325    tl_assert(0 <= (int)created && created < DRD_N_THREADS
    326              && created != DRD_INVALID_THREADID);
    327 
    328    DRD_(g_threadinfo)[created].stack_max
    329       = VG_(thread_get_stack_max)(vg_created);
    330    DRD_(g_threadinfo)[created].stack_startup
    331       = DRD_(g_threadinfo)[created].stack_max;
    332    DRD_(g_threadinfo)[created].stack_min
    333       = DRD_(g_threadinfo)[created].stack_max;
    334    DRD_(g_threadinfo)[created].stack_min_min
    335       = DRD_(g_threadinfo)[created].stack_max;
    336    DRD_(g_threadinfo)[created].stack_size
    337       = VG_(thread_get_stack_size)(vg_created);
    338    tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
    339 
    340    return created;
    341 }
    342 
    343 static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
    344 {
    345    UInt j;
    346 
    347    DRD_(g_threadinfo)[tid].vg_thread_exists = False;
    348    DRD_(g_threadinfo)[tid].posix_thread_exists = False;
    349    DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
    350 #if 0
    351    VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
    352 #endif
    353    if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
    354       for (j = 0; j < DRD_N_THREADS; ++j) {
    355          if (DRD_(IsValidDrdThreadId)(j)
    356              && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
    357          {
    358             s_deletion_tail++;
    359 #if 0
    360             VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
    361 #endif
    362             DRD_(thread_delete)(j, False);
    363             break;
    364          }
    365       }
    366    }
    367 }
    368 
    369 /**
    370  * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
    371  * after thread drd_joiner joined thread drd_joinee.
    372  */
    373 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
    374 {
    375    tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
    376    tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
    377 
    378    DRD_(thread_new_segment)(drd_joiner);
    379    DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
    380    DRD_(thread_new_segment)(drd_joinee);
    381 
    382    if (s_trace_fork_join)
    383    {
    384       const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
    385       const unsigned msg_size = 256;
    386       HChar* msg;
    387 
    388       msg = VG_(malloc)("drd.main.dptj.1", msg_size);
    389 
    390       VG_(snprintf)(msg, msg_size,
    391                     "drd_post_thread_join joiner = %d, joinee = %d",
    392                     drd_joiner, drd_joinee);
    393       if (joiner)
    394       {
    395          HChar* vc;
    396 
    397          vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
    398          VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
    399                        ", new vc: %s", vc);
    400          VG_(free)(vc);
    401       }
    402       DRD_(trace_msg)("%pS", msg);
    403       VG_(free)(msg);
    404    }
    405 
    406    if (!  DRD_(get_check_stack_accesses)())
    407    {
    408       DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
    409                                - DRD_(thread_get_stack_size)(drd_joinee),
    410                                DRD_(thread_get_stack_max)(drd_joinee));
    411    }
    412    DRD_(clientobj_delete_thread)(drd_joinee);
    413    DRD_(thread_delayed_delete)(drd_joinee);
    414 }
    415 
    416 /**
    417  * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
    418  * and accesses this data structure from multiple threads without locking.
    419  * Any conflicting accesses in the range stack_startup..stack_max will be
    420  * ignored.
    421  */
    422 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
    423                                     const Addr stack_startup)
    424 {
    425    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    426              && tid != DRD_INVALID_THREADID);
    427    tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
    428    tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
    429    DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
    430 }
    431 
    432 /** Return the stack pointer for the specified thread. */
    433 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
    434 {
    435    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    436              && tid != DRD_INVALID_THREADID);
    437    return DRD_(g_threadinfo)[tid].stack_min;
    438 }
    439 
    440 /**
    441  * Return the lowest value that was ever assigned to the stack pointer
    442  * for the specified thread.
    443  */
    444 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
    445 {
    446    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    447              && tid != DRD_INVALID_THREADID);
    448    return DRD_(g_threadinfo)[tid].stack_min_min;
    449 }
    450 
    451 /** Return the top address for the stack of the specified thread. */
    452 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
    453 {
    454    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    455              && tid != DRD_INVALID_THREADID);
    456    return DRD_(g_threadinfo)[tid].stack_max;
    457 }
    458 
    459 /** Return the maximum stack size for the specified thread. */
    460 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
    461 {
    462    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    463              && tid != DRD_INVALID_THREADID);
    464    return DRD_(g_threadinfo)[tid].stack_size;
    465 }
    466 
    467 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
    468 {
    469    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    470              && tid != DRD_INVALID_THREADID);
    471    return DRD_(g_threadinfo)[tid].on_alt_stack;
    472 }
    473 
    474 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
    475                                    const Bool on_alt_stack)
    476 {
    477    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    478              && tid != DRD_INVALID_THREADID);
    479    tl_assert(on_alt_stack == !!on_alt_stack);
    480    DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
    481 }
    482 
    483 Int DRD_(thread_get_threads_on_alt_stack)(void)
    484 {
    485    int n = 0;
    486 
    487    for (UInt i = 1; i < DRD_N_THREADS; i++)
    488       n += DRD_(g_threadinfo)[i].on_alt_stack;
    489    return n;
    490 }
    491 
    492 /**
    493  * Clean up thread-specific data structures.
    494  */
    495 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
    496 {
    497    Segment* sg;
    498    Segment* sg_prev;
    499 
    500    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    501 
    502    tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
    503    for (sg = DRD_(g_threadinfo)[tid].sg_last; sg; sg = sg_prev) {
    504       sg_prev = sg->thr_prev;
    505       sg->thr_next = NULL;
    506       sg->thr_prev = NULL;
    507       DRD_(sg_put)(sg);
    508    }
    509    DRD_(g_threadinfo)[tid].valid = False;
    510    DRD_(g_threadinfo)[tid].vg_thread_exists = False;
    511    DRD_(g_threadinfo)[tid].posix_thread_exists = False;
    512    if (detached)
    513       DRD_(g_threadinfo)[tid].detached_posix_thread = False;
    514    else
    515       tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
    516    DRD_(g_threadinfo)[tid].sg_first = NULL;
    517    DRD_(g_threadinfo)[tid].sg_last = NULL;
    518 
    519    tl_assert(!DRD_(IsValidDrdThreadId)(tid));
    520 }
    521 
    522 /**
    523  * Called after a thread performed its last memory access and before
    524  * thread_delete() is called. Note: thread_delete() is only called for
    525  * joinable threads, not for detached threads.
    526  */
    527 void DRD_(thread_finished)(const DrdThreadId tid)
    528 {
    529    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    530              && tid != DRD_INVALID_THREADID);
    531 
    532    DRD_(g_threadinfo)[tid].vg_thread_exists = False;
    533 
    534    if (DRD_(g_threadinfo)[tid].detached_posix_thread)
    535    {
    536       /*
    537        * Once a detached thread has finished, its stack is deallocated and
    538        * should no longer be taken into account when computing the conflict set.
    539        */
    540       DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
    541 
    542       /*
    543        * For a detached thread, calling pthread_exit() invalidates the
    544        * POSIX thread ID associated with the detached thread. For joinable
    545        * POSIX threads however, the POSIX thread ID remains live after the
    546        * pthread_exit() call until pthread_join() is called.
    547        */
    548       DRD_(g_threadinfo)[tid].posix_thread_exists = False;
    549    }
    550 }
    551 
    552 /** Called just after fork() in the child process. */
    553 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
    554 {
    555    unsigned i;
    556 
    557    for (i = 1; i < DRD_N_THREADS; i++)
    558    {
    559       if (i == tid)
    560 	 continue;
    561       if (DRD_(IsValidDrdThreadId(i)))
    562 	 DRD_(thread_delete)(i, True);
    563       tl_assert(!DRD_(IsValidDrdThreadId(i)));
    564    }
    565 
    566    DRD_(bm_cleanup)(DRD_(g_conflict_set));
    567    DRD_(bm_init)(DRD_(g_conflict_set));
    568 }
    569 
    570 /** Called just before pthread_cancel(). */
    571 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
    572 {
    573    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    574              && tid != DRD_INVALID_THREADID);
    575    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    576 
    577    if (DRD_(thread_get_trace_fork_join)())
    578       DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
    579                       DRD_(g_drd_running_tid), tid);
    580 }
    581 
    582 /**
    583  * Store the POSIX thread ID for the specified thread.
    584  *
    585  * @note This function can be called two times for the same thread -- see also
    586  * the comment block preceding the pthread_create() wrapper in
    587  * drd_pthread_intercepts.c.
    588  */
    589 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
    590 {
    591    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    592              && tid != DRD_INVALID_THREADID);
    593    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
    594              || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
    595    tl_assert(ptid != INVALID_POSIX_THREADID);
    596    DRD_(g_threadinfo)[tid].posix_thread_exists = True;
    597    DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
    598 }
    599 
    600 /** Returns true for joinable threads and false for detached threads. */
    601 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
    602 {
    603    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    604              && tid != DRD_INVALID_THREADID);
    605    return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
    606 }
    607 
    608 /** Store the thread mode: joinable or detached. */
    609 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
    610  /* There is a cse related issue in gcc for MIPS. Optimization level
    611     has to be lowered, so cse related optimizations are not
    612     included.*/
    613  __attribute__((optimize("O1")))
    614 #endif
    615 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
    616 {
    617    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    618              && tid != DRD_INVALID_THREADID);
    619    tl_assert((!! joinable) == joinable);
    620    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    621 
    622    DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
    623 }
    624 
    625 /** Tells DRD that the calling thread is about to enter pthread_create(). */
    626 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
    627 {
    628    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    629              && tid != DRD_INVALID_THREADID);
    630    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    631    tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
    632 
    633    DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
    634 }
    635 
    636 /** Tells DRD that the calling thread has left pthread_create(). */
    637 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
    638 {
    639    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    640              && tid != DRD_INVALID_THREADID);
    641    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    642    tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
    643 
    644    DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
    645 }
    646 
    647 /** Obtain the thread number and the user-assigned thread name. */
    648 const HChar* DRD_(thread_get_name)(const DrdThreadId tid)
    649 {
    650    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    651              && tid != DRD_INVALID_THREADID);
    652 
    653    return DRD_(g_threadinfo)[tid].name;
    654 }
    655 
    656 /** Set the name of the specified thread. */
    657 void DRD_(thread_set_name)(const DrdThreadId tid, const HChar* const name)
    658 {
    659    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    660              && tid != DRD_INVALID_THREADID);
    661 
    662    if (name == NULL || name[0] == 0)
    663       VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
    664                     sizeof(DRD_(g_threadinfo)[tid].name),
    665                     "Thread %d",
    666                     tid);
    667    else
    668       VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
    669                     sizeof(DRD_(g_threadinfo)[tid].name),
    670                     "Thread %d (%s)",
    671                     tid, name);
    672    DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
    673 }
    674 
    675 /**
    676  * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
    677  * conflict set.
    678  */
    679 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
    680 {
    681    tl_assert(vg_tid != VG_INVALID_THREADID);
    682 
    683    if (vg_tid != s_vg_running_tid)
    684    {
    685       DRD_(thread_set_running_tid)(vg_tid,
    686                                    DRD_(VgThreadIdToDrdThreadId)(vg_tid));
    687    }
    688 
    689    tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
    690    tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
    691 }
    692 
    693 /**
    694  * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
    695  * conflict set.
    696  */
    697 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
    698                                   const DrdThreadId drd_tid)
    699 {
    700    tl_assert(vg_tid != VG_INVALID_THREADID);
    701    tl_assert(drd_tid != DRD_INVALID_THREADID);
    702 
    703    if (vg_tid != s_vg_running_tid)
    704    {
    705       if (s_trace_context_switches
    706           && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
    707       {
    708          VG_(message)(Vg_DebugMsg,
    709                       "Context switch from thread %d to thread %d;"
    710                       " segments: %llu\n",
    711                       DRD_(g_drd_running_tid), drd_tid,
    712                       DRD_(sg_get_segments_alive_count)());
    713       }
    714       s_vg_running_tid = vg_tid;
    715       DRD_(g_drd_running_tid) = drd_tid;
    716       thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
    717       s_context_switch_count++;
    718    }
    719 
    720    tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
    721    tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
    722 }
    723 
    724 /**
    725  * Increase the synchronization nesting counter. Must be called before the
    726  * client calls a synchronization function.
    727  */
    728 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
    729 {
    730    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    731    return DRD_(g_threadinfo)[tid].synchr_nesting++;
    732 }
    733 
    734 /**
    735  * Decrease the synchronization nesting counter. Must be called after the
    736  * client left a synchronization function.
    737  */
    738 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
    739 {
    740    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    741    tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
    742    return --DRD_(g_threadinfo)[tid].synchr_nesting;
    743 }
    744 
    745 /** Returns the synchronization nesting counter. */
    746 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
    747 {
    748    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    749    return DRD_(g_threadinfo)[tid].synchr_nesting;
    750 }
    751 
    752 /** Append a new segment at the end of the segment list. */
    753 static
    754 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
    755 {
    756    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    757              && tid != DRD_INVALID_THREADID);
    758 
    759 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    760    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    761 #endif
    762 
    763    // add at tail
    764    sg->thr_prev = DRD_(g_threadinfo)[tid].sg_last;
    765    sg->thr_next = NULL;
    766    if (DRD_(g_threadinfo)[tid].sg_last)
    767       DRD_(g_threadinfo)[tid].sg_last->thr_next = sg;
    768    DRD_(g_threadinfo)[tid].sg_last = sg;
    769    if (DRD_(g_threadinfo)[tid].sg_first == NULL)
    770       DRD_(g_threadinfo)[tid].sg_first = sg;
    771 
    772 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    773    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    774 #endif
    775 }
    776 
    777 /**
    778  * Remove a segment from the segment list of thread threadid, and free the
    779  * associated memory.
    780  */
    781 static
    782 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
    783 {
    784    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    785              && tid != DRD_INVALID_THREADID);
    786 
    787 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    788    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    789 #endif
    790 
    791    if (sg->thr_prev)
    792       sg->thr_prev->thr_next = sg->thr_next;
    793    if (sg->thr_next)
    794       sg->thr_next->thr_prev = sg->thr_prev;
    795    if (sg == DRD_(g_threadinfo)[tid].sg_first)
    796       DRD_(g_threadinfo)[tid].sg_first = sg->thr_next;
    797    if (sg == DRD_(g_threadinfo)[tid].sg_last)
    798       DRD_(g_threadinfo)[tid].sg_last = sg->thr_prev;
    799    DRD_(sg_put)(sg);
    800 
    801 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    802    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    803 #endif
    804 }
    805 
    806 /**
    807  * Returns a pointer to the vector clock of the most recent segment associated
    808  * with thread 'tid'.
    809  */
    810 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
    811 {
    812    Segment* latest_sg;
    813 
    814    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    815              && tid != DRD_INVALID_THREADID);
    816    latest_sg = DRD_(g_threadinfo)[tid].sg_last;
    817    tl_assert(latest_sg);
    818    return &latest_sg->vc;
    819 }
    820 
    821 /**
    822  * Return the latest segment of thread 'tid' and increment its reference count.
    823  */
    824 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
    825 {
    826    Segment* latest_sg;
    827 
    828    tl_assert(sg);
    829    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    830              && tid != DRD_INVALID_THREADID);
    831    latest_sg = DRD_(g_threadinfo)[tid].sg_last;
    832    tl_assert(latest_sg);
    833 
    834    DRD_(sg_put)(*sg);
    835    *sg = DRD_(sg_get)(latest_sg);
    836 }
    837 
    838 /**
    839  * Compute the minimum of all latest vector clocks of all threads
    840  * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
    841  *
    842  * @param vc pointer to a vectorclock, holds result upon return.
    843  */
    844 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
    845 {
    846    unsigned i;
    847    Bool first;
    848    Segment* latest_sg;
    849 
    850    first = True;
    851    for (i = 0; i < DRD_N_THREADS; i++)
    852    {
    853       latest_sg = DRD_(g_threadinfo)[i].sg_last;
    854       if (latest_sg) {
    855          if (first)
    856             DRD_(vc_assign)(vc, &latest_sg->vc);
    857          else
    858             DRD_(vc_min)(vc, &latest_sg->vc);
    859          first = False;
    860       }
    861    }
    862 }
    863 
    864 /**
    865  * Compute the maximum of all latest vector clocks of all threads.
    866  *
    867  * @param vc pointer to a vectorclock, holds result upon return.
    868  */
    869 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
    870 {
    871    unsigned i;
    872    Bool first;
    873    Segment* latest_sg;
    874 
    875    first = True;
    876    for (i = 0; i < DRD_N_THREADS; i++)
    877    {
    878       latest_sg = DRD_(g_threadinfo)[i].sg_last;
    879       if (latest_sg) {
    880          if (first)
    881             DRD_(vc_assign)(vc, &latest_sg->vc);
    882          else
    883             DRD_(vc_combine)(vc, &latest_sg->vc);
    884          first = False;
    885       }
    886    }
    887 }
    888 
    889 /**
    890  * Discard all segments that have a defined order against the latest vector
    891  * clock of all threads -- these segments can no longer be involved in a
    892  * data race.
    893  */
    894 static void thread_discard_ordered_segments(void)
    895 {
    896    unsigned i;
    897    VectorClock thread_vc_min;
    898 
    899    s_discard_ordered_segments_count++;
    900 
    901    DRD_(vc_init)(&thread_vc_min, 0, 0);
    902    DRD_(thread_compute_minimum_vc)(&thread_vc_min);
    903    if (DRD_(sg_get_trace)())
    904    {
    905       HChar *vc_min, *vc_max;
    906       VectorClock thread_vc_max;
    907 
    908       DRD_(vc_init)(&thread_vc_max, 0, 0);
    909       DRD_(thread_compute_maximum_vc)(&thread_vc_max);
    910       vc_min = DRD_(vc_aprint)(&thread_vc_min);
    911       vc_max = DRD_(vc_aprint)(&thread_vc_max);
    912       VG_(message)(Vg_DebugMsg,
    913                    "Discarding ordered segments -- min vc is %s, max vc is %s\n",
    914                    vc_min, vc_max);
    915       VG_(free)(vc_min);
    916       VG_(free)(vc_max);
    917       DRD_(vc_cleanup)(&thread_vc_max);
    918    }
    919 
    920    for (i = 0; i < DRD_N_THREADS; i++) {
    921       Segment* sg;
    922       Segment* sg_next;
    923 
    924       for (sg = DRD_(g_threadinfo)[i].sg_first;
    925            sg && (sg_next = sg->thr_next)
    926               && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
    927            sg = sg_next)
    928       {
    929          thread_discard_segment(i, sg);
    930       }
    931    }
    932    DRD_(vc_cleanup)(&thread_vc_min);
    933 }
    934 
    935 /**
    936  * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
    937  * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
    938  * all segments in the set CS are ordered consistently against both sg1 and
    939  * sg2. The set CS is defined as the set of segments that can immediately
    940  * precede future segments via inter-thread synchronization operations. In
    941  * DRD the set CS consists of the latest segment of each thread combined with
    942  * all segments for which the reference count is strictly greater than one.
    943  * The code below is an optimized version of the following:
    944  *
    945  * for (i = 0; i < DRD_N_THREADS; i++)
    946  * {
    947  *    Segment* sg;
    948  *
    949  *    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
    950  *    {
    951  *       if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
    952  *       {
    953  *          if (   DRD_(vc_lte)(&sg1->vc, &sg->vc)
    954  *              != DRD_(vc_lte)(&sg2->vc, &sg->vc)
    955  *              || DRD_(vc_lte)(&sg->vc, &sg1->vc)
    956  *              != DRD_(vc_lte)(&sg->vc, &sg2->vc))
    957  *          {
    958  *             return False;
    959  *          }
    960  *       }
    961  *    }
    962  * }
    963  */
    964 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
    965                                                Segment* const sg1,
    966                                                Segment* const sg2)
    967 {
    968    unsigned i;
    969 
    970    tl_assert(sg1->thr_next);
    971    tl_assert(sg2->thr_next);
    972    tl_assert(sg1->thr_next == sg2);
    973    tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
    974 
    975    for (i = 0; i < DRD_N_THREADS; i++)
    976    {
    977       Segment* sg;
    978 
    979       for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
    980          if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
    981             if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
    982                break;
    983             if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
    984                return False;
    985          }
    986       }
    987       for (sg = DRD_(g_threadinfo)[i].sg_last; sg; sg = sg->thr_prev) {
    988          if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
    989             if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
    990                break;
    991             if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
    992                return False;
    993          }
    994       }
    995    }
    996    return True;
    997 }
    998 
    999 /**
   1000  * Merge all segments that may be merged without triggering false positives
   1001  * or discarding real data races. For the theoretical background of segment
   1002  * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
   1003  * and Koen De Bosschere. Bounding the number of segment histories during
   1004  * data race detection. Parallel Computing archive, Volume 28, Issue 9,
   1005  * pp 1221-1238, September 2002. This paper contains a proof that merging
   1006  * consecutive segments for which the property equiv(s1,s2) holds can be
   1007  * merged without reducing the accuracy of datarace detection. Furthermore
   1008  * it is also proven that the total number of all segments will never grow
   1009  * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
   1010  * every time a new segment is created. The property equiv(s1, s2) is defined
   1011  * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
   1012  * clocks of segments s and s1 are ordered in the same way as those of segments
   1013  * s and s2. The set CS is defined as the set of existing segments s that have
   1014  * the potential to conflict with not yet created segments, either because the
   1015  * segment s is the latest segment of a thread or because it can become the
   1016  * immediate predecessor of a new segment due to a synchronization operation.
   1017  */
   1018 static void thread_merge_segments(void)
   1019 {
   1020    unsigned i;
   1021 
   1022    s_new_segments_since_last_merge = 0;
   1023 
   1024    for (i = 0; i < DRD_N_THREADS; i++)
   1025    {
   1026       Segment* sg;
   1027 
   1028 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
   1029       tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
   1030 #endif
   1031 
   1032       for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
   1033          if (DRD_(sg_get_refcnt)(sg) == 1 && sg->thr_next) {
   1034             Segment* const sg_next = sg->thr_next;
   1035             if (DRD_(sg_get_refcnt)(sg_next) == 1
   1036                 && sg_next->thr_next
   1037                 && thread_consistent_segment_ordering(i, sg, sg_next))
   1038             {
   1039                /* Merge sg and sg_next into sg. */
   1040                DRD_(sg_merge)(sg, sg_next);
   1041                thread_discard_segment(i, sg_next);
   1042             }
   1043          }
   1044       }
   1045 
   1046 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
   1047       tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
   1048 #endif
   1049    }
   1050 }
   1051 
   1052 /**
   1053  * Create a new segment for the specified thread, and discard any segments
   1054  * that cannot cause races anymore.
   1055  */
   1056 void DRD_(thread_new_segment)(const DrdThreadId tid)
   1057 {
   1058    Segment* last_sg;
   1059    Segment* new_sg;
   1060 
   1061    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1062              && tid != DRD_INVALID_THREADID);
   1063    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1064 
   1065    last_sg = DRD_(g_threadinfo)[tid].sg_last;
   1066    new_sg = DRD_(sg_new)(tid, tid);
   1067    thread_append_segment(tid, new_sg);
   1068    if (tid == DRD_(g_drd_running_tid) && last_sg)
   1069    {
   1070       DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
   1071       s_update_conflict_set_new_sg_count++;
   1072    }
   1073 
   1074    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1075 
   1076    if (s_segment_merging
   1077        && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
   1078    {
   1079       thread_discard_ordered_segments();
   1080       thread_merge_segments();
   1081    }
   1082 }
   1083 
   1084 /** Call this function after thread 'joiner' joined thread 'joinee'. */
   1085 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
   1086 {
   1087    tl_assert(joiner != joinee);
   1088    tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
   1089              && joiner != DRD_INVALID_THREADID);
   1090    tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
   1091              && joinee != DRD_INVALID_THREADID);
   1092    tl_assert(DRD_(g_threadinfo)[joiner].sg_first);
   1093    tl_assert(DRD_(g_threadinfo)[joiner].sg_last);
   1094    tl_assert(DRD_(g_threadinfo)[joinee].sg_first);
   1095    tl_assert(DRD_(g_threadinfo)[joinee].sg_last);
   1096 
   1097    if (DRD_(sg_get_trace)())
   1098    {
   1099       HChar *str1, *str2;
   1100       str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
   1101       str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
   1102       VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
   1103                    str1, str2);
   1104       VG_(free)(str1);
   1105       VG_(free)(str2);
   1106    }
   1107    if (joiner == DRD_(g_drd_running_tid)) {
   1108       VectorClock old_vc;
   1109 
   1110       DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
   1111       DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
   1112                        DRD_(thread_get_vc)(joinee));
   1113       DRD_(thread_update_conflict_set)(joiner, &old_vc);
   1114       s_update_conflict_set_join_count++;
   1115       DRD_(vc_cleanup)(&old_vc);
   1116    } else {
   1117       DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
   1118                        DRD_(thread_get_vc)(joinee));
   1119    }
   1120 
   1121    thread_discard_ordered_segments();
   1122 
   1123    if (DRD_(sg_get_trace)()) {
   1124       HChar* str;
   1125 
   1126       str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
   1127       VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
   1128       VG_(free)(str);
   1129    }
   1130 }
   1131 
   1132 /**
   1133  * Update the vector clock of the last segment of thread tid with the
   1134  * the vector clock of segment sg.
   1135  */
   1136 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
   1137 {
   1138    const VectorClock* const vc = &sg->vc;
   1139 
   1140    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1141              && tid != DRD_INVALID_THREADID);
   1142    tl_assert(DRD_(g_threadinfo)[tid].sg_first);
   1143    tl_assert(DRD_(g_threadinfo)[tid].sg_last);
   1144    tl_assert(sg);
   1145    tl_assert(vc);
   1146 
   1147    if (tid != sg->tid) {
   1148       VectorClock old_vc;
   1149 
   1150       DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
   1151       DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
   1152       if (DRD_(sg_get_trace)()) {
   1153          HChar *str1, *str2;
   1154          str1 = DRD_(vc_aprint)(&old_vc);
   1155          str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
   1156          VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
   1157          VG_(free)(str1);
   1158          VG_(free)(str2);
   1159       }
   1160 
   1161       thread_discard_ordered_segments();
   1162 
   1163       DRD_(thread_update_conflict_set)(tid, &old_vc);
   1164       s_update_conflict_set_sync_count++;
   1165 
   1166       DRD_(vc_cleanup)(&old_vc);
   1167    } else {
   1168       tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
   1169    }
   1170 }
   1171 
   1172 /**
   1173  * Create a new segment for thread tid and update the vector clock of the last
   1174  * segment of this thread with the the vector clock of segment sg. Call this
   1175  * function after thread tid had to wait because of thread synchronization
   1176  * until the memory accesses in the segment sg finished.
   1177  */
   1178 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
   1179 {
   1180    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1181              && tid != DRD_INVALID_THREADID);
   1182    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1183    tl_assert(sg);
   1184 
   1185    thread_append_segment(tid, DRD_(sg_new)(tid, tid));
   1186 
   1187    thread_combine_vc_sync(tid, sg);
   1188 
   1189    if (s_segment_merging
   1190        && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
   1191    {
   1192       thread_discard_ordered_segments();
   1193       thread_merge_segments();
   1194    }
   1195 }
   1196 
   1197 /**
   1198  * Call this function whenever a thread is no longer using the memory
   1199  * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
   1200  * increase.
   1201  */
   1202 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
   1203 {
   1204    Segment* p;
   1205 
   1206    for (p = DRD_(g_sg_list); p; p = p->g_next)
   1207       DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
   1208 
   1209    DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
   1210 }
   1211 
   1212 /** Specify whether memory loads should be recorded. */
   1213 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
   1214 {
   1215    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1216              && tid != DRD_INVALID_THREADID);
   1217    tl_assert(enabled == !! enabled);
   1218 
   1219    DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
   1220 }
   1221 
   1222 /** Specify whether memory stores should be recorded. */
   1223 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
   1224 {
   1225    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1226              && tid != DRD_INVALID_THREADID);
   1227    tl_assert(enabled == !! enabled);
   1228 
   1229    DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
   1230 }
   1231 
   1232 /**
   1233  * Print the segment information for all threads.
   1234  *
   1235  * This function is only used for debugging purposes.
   1236  */
   1237 void DRD_(thread_print_all)(void)
   1238 {
   1239    unsigned i;
   1240    Segment* p;
   1241 
   1242    for (i = 0; i < DRD_N_THREADS; i++)
   1243    {
   1244       p = DRD_(g_threadinfo)[i].sg_first;
   1245       if (p) {
   1246          VG_(printf)("**************\n"
   1247                      "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
   1248                      "**************\n",
   1249                      i,
   1250                      DRD_(g_threadinfo)[i].valid,
   1251                      DRD_(g_threadinfo)[i].vg_thread_exists,
   1252                      DRD_(g_threadinfo)[i].vg_threadid,
   1253                      DRD_(g_threadinfo)[i].posix_thread_exists,
   1254                      DRD_(g_threadinfo)[i].pt_threadid,
   1255                      DRD_(g_threadinfo)[i].detached_posix_thread);
   1256          for ( ; p; p = p->thr_next)
   1257             DRD_(sg_print)(p);
   1258       }
   1259    }
   1260 }
   1261 
   1262 /** Show a call stack involved in a data race. */
   1263 static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
   1264 {
   1265    const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
   1266 
   1267    if (vg_tid != VG_INVALID_THREADID) {
   1268       if (callstack)
   1269          VG_(pp_ExeContext)(callstack);
   1270       else
   1271          VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
   1272    } else {
   1273       if (!VG_(clo_xml))
   1274          VG_(message)(Vg_UserMsg,
   1275                       "   (thread finished, call stack no longer available)\n");
   1276    }
   1277 }
   1278 
   1279 /** Print information about the segments involved in a data race. */
   1280 static void
   1281 thread_report_conflicting_segments_segment(const DrdThreadId tid,
   1282                                            const Addr addr,
   1283                                            const SizeT size,
   1284                                            const BmAccessTypeT access_type,
   1285                                            const Segment* const p)
   1286 {
   1287    unsigned i;
   1288 
   1289    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1290              && tid != DRD_INVALID_THREADID);
   1291    tl_assert(p);
   1292 
   1293    for (i = 0; i < DRD_N_THREADS; i++) {
   1294       if (i != tid) {
   1295          Segment* q;
   1296 
   1297          for (q = DRD_(g_threadinfo)[i].sg_last; q; q = q->thr_prev) {
   1298             /*
   1299              * Since q iterates over the segments of thread i in order of
   1300              * decreasing vector clocks, if q->vc <= p->vc, then
   1301              * q->next->vc <= p->vc will also hold. Hence, break out of the
   1302              * loop once this condition is met.
   1303              */
   1304             if (DRD_(vc_lte)(&q->vc, &p->vc))
   1305                break;
   1306             if (!DRD_(vc_lte)(&p->vc, &q->vc)) {
   1307                if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
   1308                                               access_type)) {
   1309                   Segment* q_next;
   1310 
   1311                   tl_assert(q->stacktrace);
   1312                   if (VG_(clo_xml))
   1313                      VG_(printf_xml)("  <other_segment_start>\n");
   1314                   else
   1315                      VG_(message)(Vg_UserMsg,
   1316                                   "Other segment start (thread %d)\n", i);
   1317                   show_call_stack(i, q->stacktrace);
   1318                   if (VG_(clo_xml))
   1319                      VG_(printf_xml)("  </other_segment_start>\n"
   1320                                      "  <other_segment_end>\n");
   1321                   else
   1322                      VG_(message)(Vg_UserMsg,
   1323                                   "Other segment end (thread %d)\n", i);
   1324                   q_next = q->thr_next;
   1325                   show_call_stack(i, q_next ? q_next->stacktrace : 0);
   1326                   if (VG_(clo_xml))
   1327                      VG_(printf_xml)("  </other_segment_end>\n");
   1328                }
   1329             }
   1330          }
   1331       }
   1332    }
   1333 }
   1334 
   1335 /** Print information about all segments involved in a data race. */
   1336 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
   1337                                               const Addr addr,
   1338                                               const SizeT size,
   1339                                               const BmAccessTypeT access_type)
   1340 {
   1341    Segment* p;
   1342 
   1343    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1344              && tid != DRD_INVALID_THREADID);
   1345 
   1346    for (p = DRD_(g_threadinfo)[tid].sg_first; p; p = p->thr_next) {
   1347       if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
   1348          thread_report_conflicting_segments_segment(tid, addr, size,
   1349                                                     access_type, p);
   1350    }
   1351 }
   1352 
   1353 /**
   1354  * Verify whether the conflict set for thread tid is up to date. Only perform
   1355  * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
   1356  */
   1357 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
   1358 {
   1359    Bool result;
   1360    struct bitmap* computed_conflict_set = 0;
   1361 
   1362    if (!DRD_(verify_conflict_set))
   1363       return True;
   1364 
   1365    thread_compute_conflict_set(&computed_conflict_set, tid);
   1366    result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
   1367    if (! result)
   1368    {
   1369       VG_(printf)("actual conflict set:\n");
   1370       DRD_(bm_print)(DRD_(g_conflict_set));
   1371       VG_(printf)("\n");
   1372       VG_(printf)("computed conflict set:\n");
   1373       DRD_(bm_print)(computed_conflict_set);
   1374       VG_(printf)("\n");
   1375    }
   1376    DRD_(bm_delete)(computed_conflict_set);
   1377    return result;
   1378 }
   1379 
   1380 /**
   1381  * Compute the conflict set: a bitmap that represents the union of all memory
   1382  * accesses of all segments that are unordered to the current segment of the
   1383  * thread tid.
   1384  */
   1385 static void thread_compute_conflict_set(struct bitmap** conflict_set,
   1386                                         const DrdThreadId tid)
   1387 {
   1388    Segment* p;
   1389 
   1390    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1391              && tid != DRD_INVALID_THREADID);
   1392    tl_assert(tid == DRD_(g_drd_running_tid));
   1393 
   1394    s_compute_conflict_set_count++;
   1395    s_conflict_set_bitmap_creation_count
   1396       -= DRD_(bm_get_bitmap_creation_count)();
   1397    s_conflict_set_bitmap2_creation_count
   1398       -= DRD_(bm_get_bitmap2_creation_count)();
   1399 
   1400    if (*conflict_set) {
   1401       DRD_(bm_cleanup)(*conflict_set);
   1402       DRD_(bm_init)(*conflict_set);
   1403    } else {
   1404       *conflict_set = DRD_(bm_new)();
   1405    }
   1406 
   1407    if (s_trace_conflict_set) {
   1408       HChar* str;
   1409 
   1410       str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
   1411       VG_(message)(Vg_DebugMsg,
   1412                    "computing conflict set for thread %d with vc %s\n",
   1413                    tid, str);
   1414       VG_(free)(str);
   1415    }
   1416 
   1417    p = DRD_(g_threadinfo)[tid].sg_last;
   1418    {
   1419       unsigned j;
   1420 
   1421       if (s_trace_conflict_set) {
   1422          HChar* vc;
   1423 
   1424          vc = DRD_(vc_aprint)(&p->vc);
   1425          VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
   1426                       tid, vc);
   1427          VG_(free)(vc);
   1428       }
   1429 
   1430       for (j = 0; j < DRD_N_THREADS; j++) {
   1431          if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
   1432             Segment* q;
   1433 
   1434             for (q = DRD_(g_threadinfo)[j].sg_last; q; q = q->thr_prev) {
   1435                if (!DRD_(vc_lte)(&q->vc, &p->vc)
   1436                    && !DRD_(vc_lte)(&p->vc, &q->vc)) {
   1437                   if (s_trace_conflict_set) {
   1438                      HChar* str;
   1439 
   1440                      str = DRD_(vc_aprint)(&q->vc);
   1441                      VG_(message)(Vg_DebugMsg,
   1442                                   "conflict set: [%d] merging segment %s\n",
   1443                                   j, str);
   1444                      VG_(free)(str);
   1445                   }
   1446                   DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
   1447                } else {
   1448                   if (s_trace_conflict_set) {
   1449                      HChar* str;
   1450 
   1451                      str = DRD_(vc_aprint)(&q->vc);
   1452                      VG_(message)(Vg_DebugMsg,
   1453                                   "conflict set: [%d] ignoring segment %s\n",
   1454                                   j, str);
   1455                      VG_(free)(str);
   1456                   }
   1457                }
   1458             }
   1459          }
   1460       }
   1461    }
   1462 
   1463    s_conflict_set_bitmap_creation_count
   1464       += DRD_(bm_get_bitmap_creation_count)();
   1465    s_conflict_set_bitmap2_creation_count
   1466       += DRD_(bm_get_bitmap2_creation_count)();
   1467 
   1468    if (s_trace_conflict_set_bm) {
   1469       VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
   1470       DRD_(bm_print)(*conflict_set);
   1471       VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
   1472    }
   1473 }
   1474 
   1475 /**
   1476  * Update the conflict set after the vector clock of thread tid has been
   1477  * updated from old_vc to its current value, either because a new segment has
   1478  * been created or because of a synchronization operation.
   1479  */
   1480 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
   1481                                       const VectorClock* const old_vc)
   1482 {
   1483    const VectorClock* new_vc;
   1484    Segment* p;
   1485    unsigned j;
   1486 
   1487    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1488              && tid != DRD_INVALID_THREADID);
   1489    tl_assert(old_vc);
   1490    tl_assert(tid == DRD_(g_drd_running_tid));
   1491    tl_assert(DRD_(g_conflict_set));
   1492 
   1493    if (s_trace_conflict_set) {
   1494       HChar* str;
   1495 
   1496       str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
   1497       VG_(message)(Vg_DebugMsg,
   1498                    "updating conflict set for thread %d with vc %s\n",
   1499                    tid, str);
   1500       VG_(free)(str);
   1501    }
   1502 
   1503    new_vc = DRD_(thread_get_vc)(tid);
   1504    tl_assert(DRD_(vc_lte)(old_vc, new_vc));
   1505 
   1506    DRD_(bm_unmark)(DRD_(g_conflict_set));
   1507 
   1508    for (j = 0; j < DRD_N_THREADS; j++)
   1509    {
   1510       Segment* q;
   1511 
   1512       if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
   1513          continue;
   1514 
   1515       for (q = DRD_(g_threadinfo)[j].sg_last;
   1516            q && !DRD_(vc_lte)(&q->vc, new_vc);
   1517            q = q->thr_prev) {
   1518          const Bool included_in_old_conflict_set
   1519             = !DRD_(vc_lte)(old_vc, &q->vc);
   1520          const Bool included_in_new_conflict_set
   1521             = !DRD_(vc_lte)(new_vc, &q->vc);
   1522 
   1523          if (UNLIKELY(s_trace_conflict_set)) {
   1524             HChar* str;
   1525 
   1526             str = DRD_(vc_aprint)(&q->vc);
   1527             VG_(message)(Vg_DebugMsg,
   1528                          "conflict set: [%d] %s segment %s\n", j,
   1529                          included_in_old_conflict_set
   1530                          != included_in_new_conflict_set
   1531                          ? "merging" : "ignoring", str);
   1532             VG_(free)(str);
   1533          }
   1534          if (included_in_old_conflict_set != included_in_new_conflict_set)
   1535             DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
   1536       }
   1537 
   1538       for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->thr_prev) {
   1539          const Bool included_in_old_conflict_set
   1540             = !DRD_(vc_lte)(old_vc, &q->vc);
   1541          const Bool included_in_new_conflict_set
   1542             = !DRD_(vc_lte)(&q->vc, new_vc)
   1543             && !DRD_(vc_lte)(new_vc, &q->vc);
   1544 
   1545          if (UNLIKELY(s_trace_conflict_set)) {
   1546             HChar* str;
   1547 
   1548             str = DRD_(vc_aprint)(&q->vc);
   1549             VG_(message)(Vg_DebugMsg,
   1550                          "conflict set: [%d] %s segment %s\n", j,
   1551                          included_in_old_conflict_set
   1552                          != included_in_new_conflict_set
   1553                          ? "merging" : "ignoring", str);
   1554             VG_(free)(str);
   1555          }
   1556          if (included_in_old_conflict_set != included_in_new_conflict_set)
   1557             DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
   1558       }
   1559    }
   1560 
   1561    DRD_(bm_clear_marked)(DRD_(g_conflict_set));
   1562 
   1563    p = DRD_(g_threadinfo)[tid].sg_last;
   1564    for (j = 0; j < DRD_N_THREADS; j++) {
   1565       if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
   1566          Segment* q;
   1567          for (q = DRD_(g_threadinfo)[j].sg_last;
   1568               q && !DRD_(vc_lte)(&q->vc, &p->vc);
   1569               q = q->thr_prev) {
   1570             if (!DRD_(vc_lte)(&p->vc, &q->vc))
   1571                DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
   1572          }
   1573       }
   1574    }
   1575 
   1576    DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
   1577 
   1578    s_update_conflict_set_count++;
   1579 
   1580    if (s_trace_conflict_set_bm)
   1581    {
   1582       VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
   1583       DRD_(bm_print)(DRD_(g_conflict_set));
   1584       VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
   1585    }
   1586 
   1587    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1588 }
   1589 
   1590 /** Report the number of context switches performed. */
   1591 ULong DRD_(thread_get_context_switch_count)(void)
   1592 {
   1593    return s_context_switch_count;
   1594 }
   1595 
   1596 /** Report the number of ordered segments that have been discarded. */
   1597 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
   1598 {
   1599    return s_discard_ordered_segments_count;
   1600 }
   1601 
   1602 /** Return how many times the conflict set has been updated entirely. */
   1603 ULong DRD_(thread_get_compute_conflict_set_count)()
   1604 {
   1605    return s_compute_conflict_set_count;
   1606 }
   1607 
   1608 /** Return how many times the conflict set has been updated partially. */
   1609 ULong DRD_(thread_get_update_conflict_set_count)(void)
   1610 {
   1611    return s_update_conflict_set_count;
   1612 }
   1613 
   1614 /**
   1615  * Return how many times the conflict set has been updated partially
   1616  * because a new segment has been created.
   1617  */
   1618 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
   1619 {
   1620    return s_update_conflict_set_new_sg_count;
   1621 }
   1622 
   1623 /**
   1624  * Return how many times the conflict set has been updated partially
   1625  * because of combining vector clocks due to synchronization operations
   1626  * other than reader/writer lock or barrier operations.
   1627  */
   1628 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
   1629 {
   1630    return s_update_conflict_set_sync_count;
   1631 }
   1632 
   1633 /**
   1634  * Return how many times the conflict set has been updated partially
   1635  * because of thread joins.
   1636  */
   1637 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
   1638 {
   1639    return s_update_conflict_set_join_count;
   1640 }
   1641 
   1642 /**
   1643  * Return the number of first-level bitmaps that have been created during
   1644  * conflict set updates.
   1645  */
   1646 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
   1647 {
   1648    return s_conflict_set_bitmap_creation_count;
   1649 }
   1650 
   1651 /**
   1652  * Return the number of second-level bitmaps that have been created during
   1653  * conflict set updates.
   1654  */
   1655 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
   1656 {
   1657    return s_conflict_set_bitmap2_creation_count;
   1658 }
   1659