Home | History | Annotate | Download | only in drd
      1 /* -*- mode: C; c-basic-offset: 3; -*- */
      2 /*
      3   This file is part of drd, a thread error detector.
      4 
      5   Copyright (C) 2006-2010 Bart Van Assche <bvanassche (at) acm.org>.
      6 
      7   This program is free software; you can redistribute it and/or
      8   modify it under the terms of the GNU General Public License as
      9   published by the Free Software Foundation; either version 2 of the
     10   License, or (at your option) any later version.
     11 
     12   This program is distributed in the hope that it will be useful, but
     13   WITHOUT ANY WARRANTY; without even the implied warranty of
     14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15   General Public License for more details.
     16 
     17   You should have received a copy of the GNU General Public License
     18   along with this program; if not, write to the Free Software
     19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     20   02111-1307, USA.
     21 
     22   The GNU General Public License is contained in the file COPYING.
     23 */
     24 
     25 
     26 #include "drd_error.h"
     27 #include "drd_barrier.h"
     28 #include "drd_clientobj.h"
     29 #include "drd_cond.h"
     30 #include "drd_mutex.h"
     31 #include "drd_segment.h"
     32 #include "drd_semaphore.h"
     33 #include "drd_suppression.h"
     34 #include "drd_thread.h"
     35 #include "pub_tool_vki.h"
     36 #include "pub_tool_basics.h"      // Addr, SizeT
     37 #include "pub_tool_libcassert.h"  // tl_assert()
     38 #include "pub_tool_libcbase.h"    // VG_(strlen)()
     39 #include "pub_tool_libcprint.h"   // VG_(printf)()
     40 #include "pub_tool_libcproc.h"    // VG_(getenv)()
     41 #include "pub_tool_machine.h"
     42 #include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
     43 #include "pub_tool_options.h"     // VG_(clo_backtrace_size)
     44 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
     45 
     46 
     47 
     48 /* Local functions. */
     49 
     50 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
     51 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
     52 static void thread_compute_conflict_set(struct bitmap** conflict_set,
     53                                         const DrdThreadId tid);
     54 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
     55 
     56 
     57 /* Local variables. */
     58 
     59 static ULong    s_context_switch_count;
     60 static ULong    s_discard_ordered_segments_count;
     61 static ULong    s_compute_conflict_set_count;
     62 static ULong    s_update_conflict_set_count;
     63 static ULong    s_update_conflict_set_new_sg_count;
     64 static ULong    s_update_conflict_set_sync_count;
     65 static ULong    s_update_conflict_set_join_count;
     66 static ULong    s_conflict_set_bitmap_creation_count;
     67 static ULong    s_conflict_set_bitmap2_creation_count;
     68 static ThreadId s_vg_running_tid  = VG_INVALID_THREADID;
     69 DrdThreadId     DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
     70 ThreadInfo      DRD_(g_threadinfo)[DRD_N_THREADS];
     71 struct bitmap*  DRD_(g_conflict_set);
     72 static Bool     s_trace_context_switches = False;
     73 static Bool     s_trace_conflict_set = False;
     74 static Bool     s_trace_conflict_set_bm = False;
     75 static Bool     s_trace_fork_join = False;
     76 static Bool     s_segment_merging = True;
     77 static Bool     s_new_segments_since_last_merge;
     78 static int      s_segment_merge_interval = 10;
     79 
     80 
     81 /* Function definitions. */
     82 
     83 /** Enables/disables context switch tracing. */
     84 void DRD_(thread_trace_context_switches)(const Bool t)
     85 {
     86    tl_assert(t == False || t == True);
     87    s_trace_context_switches = t;
     88 }
     89 
     90 /** Enables/disables conflict set tracing. */
     91 void DRD_(thread_trace_conflict_set)(const Bool t)
     92 {
     93    tl_assert(t == False || t == True);
     94    s_trace_conflict_set = t;
     95 }
     96 
     97 /** Enables/disables conflict set bitmap tracing. */
     98 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
     99 {
    100    tl_assert(t == False || t == True);
    101    s_trace_conflict_set_bm = t;
    102 }
    103 
    104 /** Report whether fork/join tracing is enabled. */
    105 Bool DRD_(thread_get_trace_fork_join)(void)
    106 {
    107    return s_trace_fork_join;
    108 }
    109 
    110 /** Enables/disables fork/join tracing. */
    111 void DRD_(thread_set_trace_fork_join)(const Bool t)
    112 {
    113    tl_assert(t == False || t == True);
    114    s_trace_fork_join = t;
    115 }
    116 
    117 /** Enables/disables segment merging. */
    118 void DRD_(thread_set_segment_merging)(const Bool m)
    119 {
    120    tl_assert(m == False || m == True);
    121    s_segment_merging = m;
    122 }
    123 
    124 /** Get the segment merging interval. */
    125 int DRD_(thread_get_segment_merge_interval)(void)
    126 {
    127    return s_segment_merge_interval;
    128 }
    129 
    130 /** Set the segment merging interval. */
    131 void DRD_(thread_set_segment_merge_interval)(const int i)
    132 {
    133    s_segment_merge_interval = i;
    134 }
    135 
    136 /**
    137  * Convert Valgrind's ThreadId into a DrdThreadId.
    138  *
    139  * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
    140  *         Valgrind ThreadId does not yet exist.
    141  */
    142 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
    143 {
    144    int i;
    145 
    146    if (tid == VG_INVALID_THREADID)
    147       return DRD_INVALID_THREADID;
    148 
    149    for (i = 1; i < DRD_N_THREADS; i++)
    150    {
    151       if (DRD_(g_threadinfo)[i].vg_thread_exists == True
    152           && DRD_(g_threadinfo)[i].vg_threadid == tid)
    153       {
    154          return i;
    155       }
    156    }
    157 
    158    return DRD_INVALID_THREADID;
    159 }
    160 
    161 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
    162 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
    163 {
    164    int i;
    165 
    166    tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
    167 
    168    for (i = 1; i < DRD_N_THREADS; i++)
    169    {
    170       if (DRD_(g_threadinfo)[i].vg_thread_exists == False
    171           && DRD_(g_threadinfo)[i].posix_thread_exists == False
    172           && DRD_(g_threadinfo)[i].detached_posix_thread == False)
    173       {
    174          tl_assert(! DRD_(IsValidDrdThreadId)(i));
    175 
    176          DRD_(g_threadinfo)[i].vg_thread_exists = True;
    177          DRD_(g_threadinfo)[i].vg_threadid   = tid;
    178          DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
    179          DRD_(g_threadinfo)[i].stack_min     = 0;
    180          DRD_(g_threadinfo)[i].stack_min_min = 0;
    181          DRD_(g_threadinfo)[i].stack_startup = 0;
    182          DRD_(g_threadinfo)[i].stack_max     = 0;
    183          DRD_(thread_set_name)(i, "");
    184          DRD_(g_threadinfo)[i].on_alt_stack        = False;
    185          DRD_(g_threadinfo)[i].is_recording_loads  = True;
    186          DRD_(g_threadinfo)[i].is_recording_stores = True;
    187          DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
    188          DRD_(g_threadinfo)[i].synchr_nesting = 0;
    189          tl_assert(DRD_(g_threadinfo)[i].first == 0);
    190          tl_assert(DRD_(g_threadinfo)[i].last == 0);
    191 
    192          tl_assert(DRD_(IsValidDrdThreadId)(i));
    193 
    194          return i;
    195       }
    196    }
    197 
    198    VG_(printf)(
    199 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
    200 "Aborting.\n");
    201 
    202    tl_assert(False);
    203 
    204    return DRD_INVALID_THREADID;
    205 }
    206 
    207 /** Convert a POSIX thread ID into a DRD thread ID. */
    208 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
    209 {
    210    int i;
    211 
    212    if (tid != INVALID_POSIX_THREADID)
    213    {
    214       for (i = 1; i < DRD_N_THREADS; i++)
    215       {
    216          if (DRD_(g_threadinfo)[i].posix_thread_exists
    217              && DRD_(g_threadinfo)[i].pt_threadid == tid)
    218          {
    219             return i;
    220          }
    221       }
    222    }
    223    return DRD_INVALID_THREADID;
    224 }
    225 
    226 /** Convert a DRD thread ID into a Valgrind thread ID. */
    227 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
    228 {
    229    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    230              && tid != DRD_INVALID_THREADID);
    231 
    232    return (DRD_(g_threadinfo)[tid].vg_thread_exists
    233            ? DRD_(g_threadinfo)[tid].vg_threadid
    234            : VG_INVALID_THREADID);
    235 }
    236 
    237 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    238 /**
    239  * Sanity check of the doubly linked list of segments referenced by a
    240  * ThreadInfo struct.
    241  * @return True if sane, False if not.
    242  */
    243 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
    244 {
    245    Segment* p;
    246 
    247    for (p = ti->first; p; p = p->next) {
    248       if (p->next && p->next->prev != p)
    249          return False;
    250       if (p->next == 0 && p != ti->last)
    251          return False;
    252    }
    253    for (p = ti->last; p; p = p->prev) {
    254       if (p->prev && p->prev->next != p)
    255          return False;
    256       if (p->prev == 0 && p != ti->first)
    257          return False;
    258    }
    259    return True;
    260 }
    261 #endif
    262 
    263 /**
    264  * Create the first segment for a newly started thread.
    265  *
    266  * This function is called from the handler installed via
    267  * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
    268  * from the context of the creator thread, before the new thread has been
    269  * created.
    270  *
    271  * @param[in] creator    DRD thread ID of the creator thread.
    272  * @param[in] vg_created Valgrind thread ID of the created thread.
    273  *
    274  * @return DRD thread ID of the created thread.
    275  */
    276 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
    277                                     const ThreadId vg_created)
    278 {
    279    DrdThreadId created;
    280 
    281    tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
    282    created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
    283    tl_assert(0 <= (int)created && created < DRD_N_THREADS
    284              && created != DRD_INVALID_THREADID);
    285 
    286    tl_assert(DRD_(g_threadinfo)[created].first == 0);
    287    tl_assert(DRD_(g_threadinfo)[created].last == 0);
    288    /* Create an initial segment for the newly created thread. */
    289    thread_append_segment(created, DRD_(sg_new)(creator, created));
    290 
    291    return created;
    292 }
    293 
    294 /**
    295  * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
    296  * after the thread has been created and before any client instructions are run
    297  * on the newly created thread, e.g. from the handler installed via
    298  * VG_(track_pre_thread_first_insn)().
    299  *
    300  * @param[in] vg_created Valgrind thread ID of the newly created thread.
    301  *
    302  * @return DRD thread ID for the new thread.
    303  */
    304 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
    305 {
    306    const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
    307 
    308    tl_assert(0 <= (int)created && created < DRD_N_THREADS
    309              && created != DRD_INVALID_THREADID);
    310 
    311    DRD_(g_threadinfo)[created].stack_max
    312       = VG_(thread_get_stack_max)(vg_created);
    313    DRD_(g_threadinfo)[created].stack_startup
    314       = DRD_(g_threadinfo)[created].stack_max;
    315    DRD_(g_threadinfo)[created].stack_min
    316       = DRD_(g_threadinfo)[created].stack_max;
    317    DRD_(g_threadinfo)[created].stack_min_min
    318       = DRD_(g_threadinfo)[created].stack_max;
    319    DRD_(g_threadinfo)[created].stack_size
    320       = VG_(thread_get_stack_size)(vg_created);
    321    tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
    322 
    323    return created;
    324 }
    325 
    326 /**
    327  * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
    328  * after thread drd_joiner joined thread drd_joinee.
    329  */
    330 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
    331 {
    332    tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
    333    tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
    334 
    335    DRD_(thread_new_segment)(drd_joiner);
    336    DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
    337    DRD_(thread_new_segment)(drd_joinee);
    338 
    339    if (s_trace_fork_join)
    340    {
    341       const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
    342       const unsigned msg_size = 256;
    343       char* msg;
    344 
    345       msg = VG_(malloc)("drd.main.dptj.1", msg_size);
    346       tl_assert(msg);
    347       VG_(snprintf)(msg, msg_size,
    348                     "drd_post_thread_join joiner = %d, joinee = %d",
    349                     drd_joiner, drd_joinee);
    350       if (joiner)
    351       {
    352          char* vc;
    353 
    354          vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
    355          VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
    356                        ", new vc: %s", vc);
    357          VG_(free)(vc);
    358       }
    359       VG_(message)(Vg_DebugMsg, "%s\n", msg);
    360       VG_(free)(msg);
    361    }
    362 
    363    if (!  DRD_(get_check_stack_accesses)())
    364    {
    365       DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
    366                                - DRD_(thread_get_stack_size)(drd_joinee),
    367                                DRD_(thread_get_stack_max)(drd_joinee));
    368    }
    369    DRD_(clientobj_delete_thread)(drd_joinee);
    370    DRD_(thread_delete)(drd_joinee, False);
    371 }
    372 
    373 /**
    374  * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
    375  * and accesses this data structure from multiple threads without locking.
    376  * Any conflicting accesses in the range stack_startup..stack_max will be
    377  * ignored.
    378  */
    379 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
    380                                     const Addr stack_startup)
    381 {
    382    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    383              && tid != DRD_INVALID_THREADID);
    384    tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
    385    tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
    386    DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
    387 }
    388 
    389 /** Return the stack pointer for the specified thread. */
    390 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
    391 {
    392    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    393              && tid != DRD_INVALID_THREADID);
    394    return DRD_(g_threadinfo)[tid].stack_min;
    395 }
    396 
    397 /**
    398  * Return the lowest value that was ever assigned to the stack pointer
    399  * for the specified thread.
    400  */
    401 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
    402 {
    403    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    404              && tid != DRD_INVALID_THREADID);
    405    return DRD_(g_threadinfo)[tid].stack_min_min;
    406 }
    407 
    408 /** Return the top address for the stack of the specified thread. */
    409 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
    410 {
    411    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    412              && tid != DRD_INVALID_THREADID);
    413    return DRD_(g_threadinfo)[tid].stack_max;
    414 }
    415 
    416 /** Return the maximum stack size for the specified thread. */
    417 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
    418 {
    419    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    420              && tid != DRD_INVALID_THREADID);
    421    return DRD_(g_threadinfo)[tid].stack_size;
    422 }
    423 
    424 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
    425 {
    426    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    427              && tid != DRD_INVALID_THREADID);
    428    return DRD_(g_threadinfo)[tid].on_alt_stack;
    429 }
    430 
    431 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
    432                                    const Bool on_alt_stack)
    433 {
    434    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    435              && tid != DRD_INVALID_THREADID);
    436    tl_assert(on_alt_stack == !!on_alt_stack);
    437    DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
    438 }
    439 
    440 Int DRD_(thread_get_threads_on_alt_stack)(void)
    441 {
    442    int i, n = 0;
    443 
    444    for (i = 1; i < DRD_N_THREADS; i++)
    445       n += DRD_(g_threadinfo)[i].on_alt_stack;
    446    return n;
    447 }
    448 
    449 /**
    450  * Clean up thread-specific data structures. Call this just after
    451  * pthread_join().
    452  */
    453 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
    454 {
    455    Segment* sg;
    456    Segment* sg_prev;
    457 
    458    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    459 
    460    tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
    461    for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
    462    {
    463       sg_prev = sg->prev;
    464       sg->prev = 0;
    465       sg->next = 0;
    466       DRD_(sg_put)(sg);
    467    }
    468    DRD_(g_threadinfo)[tid].vg_thread_exists = False;
    469    DRD_(g_threadinfo)[tid].posix_thread_exists = False;
    470    if (detached)
    471       DRD_(g_threadinfo)[tid].detached_posix_thread = False;
    472    else
    473       tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
    474    DRD_(g_threadinfo)[tid].first = 0;
    475    DRD_(g_threadinfo)[tid].last = 0;
    476 
    477    tl_assert(! DRD_(IsValidDrdThreadId)(tid));
    478 }
    479 
    480 /**
    481  * Called after a thread performed its last memory access and before
    482  * thread_delete() is called. Note: thread_delete() is only called for
    483  * joinable threads, not for detached threads.
    484  */
    485 void DRD_(thread_finished)(const DrdThreadId tid)
    486 {
    487    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    488              && tid != DRD_INVALID_THREADID);
    489 
    490    DRD_(g_threadinfo)[tid].vg_thread_exists = False;
    491 
    492    if (DRD_(g_threadinfo)[tid].detached_posix_thread)
    493    {
    494       /*
    495        * Once a detached thread has finished, its stack is deallocated and
    496        * should no longer be taken into account when computing the conflict set.
    497        */
    498       DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
    499 
    500       /*
    501        * For a detached thread, calling pthread_exit() invalidates the
    502        * POSIX thread ID associated with the detached thread. For joinable
    503        * POSIX threads however, the POSIX thread ID remains live after the
    504        * pthread_exit() call until pthread_join() is called.
    505        */
    506       DRD_(g_threadinfo)[tid].posix_thread_exists = False;
    507    }
    508 }
    509 
    510 /** Called just after fork() in the child process. */
    511 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
    512 {
    513    unsigned i;
    514 
    515    for (i = 1; i < DRD_N_THREADS; i++)
    516    {
    517       if (i == tid)
    518 	 continue;
    519       if (DRD_(IsValidDrdThreadId(i)))
    520 	 DRD_(thread_delete)(i, True);
    521       tl_assert(!DRD_(IsValidDrdThreadId(i)));
    522    }
    523 }
    524 
    525 /** Called just before pthread_cancel(). */
    526 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
    527 {
    528    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    529              && tid != DRD_INVALID_THREADID);
    530    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    531 
    532    DRD_(g_threadinfo)[tid].synchr_nesting = 0;
    533 }
    534 
    535 /**
    536  * Store the POSIX thread ID for the specified thread.
    537  *
    538  * @note This function can be called two times for the same thread -- see also
    539  * the comment block preceding the pthread_create() wrapper in
    540  * drd_pthread_intercepts.c.
    541  */
    542 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
    543 {
    544    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    545              && tid != DRD_INVALID_THREADID);
    546    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
    547              || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
    548    tl_assert(ptid != INVALID_POSIX_THREADID);
    549    DRD_(g_threadinfo)[tid].posix_thread_exists = True;
    550    DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
    551 }
    552 
    553 /** Returns true for joinable threads and false for detached threads. */
    554 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
    555 {
    556    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    557              && tid != DRD_INVALID_THREADID);
    558    return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
    559 }
    560 
    561 /** Store the thread mode: joinable or detached. */
    562 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
    563 {
    564    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    565              && tid != DRD_INVALID_THREADID);
    566    tl_assert(!! joinable == joinable);
    567    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    568 
    569    DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
    570 }
    571 
    572 /** Tells DRD that the calling thread is about to enter pthread_create(). */
    573 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
    574 {
    575    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    576              && tid != DRD_INVALID_THREADID);
    577    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    578    tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
    579 
    580    DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
    581 }
    582 
    583 /** Tells DRD that the calling thread has left pthread_create(). */
    584 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
    585 {
    586    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    587              && tid != DRD_INVALID_THREADID);
    588    tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
    589    tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
    590 
    591    DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
    592 }
    593 
    594 /** Obtain the thread number and the user-assigned thread name. */
    595 const char* DRD_(thread_get_name)(const DrdThreadId tid)
    596 {
    597    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    598              && tid != DRD_INVALID_THREADID);
    599 
    600    return DRD_(g_threadinfo)[tid].name;
    601 }
    602 
    603 /** Set the name of the specified thread. */
    604 void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name)
    605 {
    606    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    607              && tid != DRD_INVALID_THREADID);
    608 
    609    if (name == NULL || name[0] == 0)
    610       VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
    611                     sizeof(DRD_(g_threadinfo)[tid].name),
    612                     "Thread %d",
    613                     tid);
    614    else
    615       VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
    616                     sizeof(DRD_(g_threadinfo)[tid].name),
    617                     "Thread %d (%s)",
    618                     tid, name);
    619    DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
    620 }
    621 
    622 /**
    623  * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
    624  * conflict set.
    625  */
    626 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
    627 {
    628    tl_assert(vg_tid != VG_INVALID_THREADID);
    629 
    630    if (vg_tid != s_vg_running_tid)
    631    {
    632       DRD_(thread_set_running_tid)(vg_tid,
    633                                    DRD_(VgThreadIdToDrdThreadId)(vg_tid));
    634    }
    635 
    636    tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
    637    tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
    638 }
    639 
    640 /**
    641  * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
    642  * conflict set.
    643  */
    644 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
    645                                   const DrdThreadId drd_tid)
    646 {
    647    tl_assert(vg_tid != VG_INVALID_THREADID);
    648    tl_assert(drd_tid != DRD_INVALID_THREADID);
    649 
    650    if (vg_tid != s_vg_running_tid)
    651    {
    652       if (s_trace_context_switches
    653           && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
    654       {
    655          VG_(message)(Vg_DebugMsg,
    656                       "Context switch from thread %d to thread %d;"
    657                       " segments: %llu\n",
    658                       DRD_(g_drd_running_tid), drd_tid,
    659                       DRD_(sg_get_segments_alive_count)());
    660       }
    661       s_vg_running_tid = vg_tid;
    662       DRD_(g_drd_running_tid) = drd_tid;
    663       thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
    664       s_context_switch_count++;
    665    }
    666 
    667    tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
    668    tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
    669 }
    670 
    671 /**
    672  * Increase the synchronization nesting counter. Must be called before the
    673  * client calls a synchronization function.
    674  */
    675 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
    676 {
    677    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    678    return DRD_(g_threadinfo)[tid].synchr_nesting++;
    679 }
    680 
    681 /**
    682  * Decrease the synchronization nesting counter. Must be called after the
    683  * client left a synchronization function.
    684  */
    685 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
    686 {
    687    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    688    tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
    689    return --DRD_(g_threadinfo)[tid].synchr_nesting;
    690 }
    691 
    692 /** Returns the synchronization nesting counter. */
    693 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
    694 {
    695    tl_assert(DRD_(IsValidDrdThreadId)(tid));
    696    return DRD_(g_threadinfo)[tid].synchr_nesting;
    697 }
    698 
    699 /** Append a new segment at the end of the segment list. */
    700 static
    701 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
    702 {
    703    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    704              && tid != DRD_INVALID_THREADID);
    705 
    706 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    707    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    708 #endif
    709 
    710    sg->prev = DRD_(g_threadinfo)[tid].last;
    711    sg->next = 0;
    712    if (DRD_(g_threadinfo)[tid].last)
    713       DRD_(g_threadinfo)[tid].last->next = sg;
    714    DRD_(g_threadinfo)[tid].last = sg;
    715    if (DRD_(g_threadinfo)[tid].first == 0)
    716       DRD_(g_threadinfo)[tid].first = sg;
    717 
    718 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    719    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    720 #endif
    721 }
    722 
    723 /**
    724  * Remove a segment from the segment list of thread threadid, and free the
    725  * associated memory.
    726  */
    727 static
    728 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
    729 {
    730    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    731              && tid != DRD_INVALID_THREADID);
    732 
    733 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    734    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    735 #endif
    736 
    737    if (sg->prev)
    738       sg->prev->next = sg->next;
    739    if (sg->next)
    740       sg->next->prev = sg->prev;
    741    if (sg == DRD_(g_threadinfo)[tid].first)
    742       DRD_(g_threadinfo)[tid].first = sg->next;
    743    if (sg == DRD_(g_threadinfo)[tid].last)
    744       DRD_(g_threadinfo)[tid].last = sg->prev;
    745    DRD_(sg_put)(sg);
    746 
    747 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    748    tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
    749 #endif
    750 }
    751 
    752 /**
    753  * Returns a pointer to the vector clock of the most recent segment associated
    754  * with thread 'tid'.
    755  */
    756 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
    757 {
    758    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    759              && tid != DRD_INVALID_THREADID);
    760    tl_assert(DRD_(g_threadinfo)[tid].last);
    761    return &DRD_(g_threadinfo)[tid].last->vc;
    762 }
    763 
    764 /**
    765  * Return the latest segment of thread 'tid' and increment its reference count.
    766  */
    767 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
    768 {
    769    tl_assert(sg);
    770    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
    771              && tid != DRD_INVALID_THREADID);
    772    tl_assert(DRD_(g_threadinfo)[tid].last);
    773 
    774    DRD_(sg_put)(*sg);
    775    *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
    776 }
    777 
    778 /**
    779  * Compute the minimum of all latest vector clocks of all threads
    780  * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
    781  *
    782  * @param vc pointer to a vectorclock, holds result upon return.
    783  */
    784 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
    785 {
    786    unsigned i;
    787    Bool first;
    788    Segment* latest_sg;
    789 
    790    first = True;
    791    for (i = 0; i < DRD_N_THREADS; i++)
    792    {
    793       latest_sg = DRD_(g_threadinfo)[i].last;
    794       if (latest_sg)
    795       {
    796          if (first)
    797             DRD_(vc_assign)(vc, &latest_sg->vc);
    798          else
    799             DRD_(vc_min)(vc, &latest_sg->vc);
    800          first = False;
    801       }
    802    }
    803 }
    804 
    805 /**
    806  * Compute the maximum of all latest vector clocks of all threads.
    807  *
    808  * @param vc pointer to a vectorclock, holds result upon return.
    809  */
    810 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
    811 {
    812    unsigned i;
    813    Bool first;
    814    Segment* latest_sg;
    815 
    816    first = True;
    817    for (i = 0; i < DRD_N_THREADS; i++)
    818    {
    819       latest_sg = DRD_(g_threadinfo)[i].last;
    820       if (latest_sg)
    821       {
    822          if (first)
    823             DRD_(vc_assign)(vc, &latest_sg->vc);
    824          else
    825             DRD_(vc_combine)(vc, &latest_sg->vc);
    826          first = False;
    827       }
    828    }
    829 }
    830 
    831 /**
    832  * Discard all segments that have a defined order against the latest vector
    833  * clock of all threads -- these segments can no longer be involved in a
    834  * data race.
    835  */
    836 static void thread_discard_ordered_segments(void)
    837 {
    838    unsigned i;
    839    VectorClock thread_vc_min;
    840 
    841    s_discard_ordered_segments_count++;
    842 
    843    DRD_(vc_init)(&thread_vc_min, 0, 0);
    844    DRD_(thread_compute_minimum_vc)(&thread_vc_min);
    845    if (DRD_(sg_get_trace)())
    846    {
    847       char *vc_min, *vc_max;
    848       VectorClock thread_vc_max;
    849 
    850       DRD_(vc_init)(&thread_vc_max, 0, 0);
    851       DRD_(thread_compute_maximum_vc)(&thread_vc_max);
    852       vc_min = DRD_(vc_aprint)(&thread_vc_min);
    853       vc_max = DRD_(vc_aprint)(&thread_vc_max);
    854       VG_(message)(Vg_DebugMsg,
    855                    "Discarding ordered segments -- min vc is %s, max vc is %s\n",
    856                    vc_min, vc_max);
    857       VG_(free)(vc_min);
    858       VG_(free)(vc_max);
    859       DRD_(vc_cleanup)(&thread_vc_max);
    860    }
    861 
    862    for (i = 0; i < DRD_N_THREADS; i++)
    863    {
    864       Segment* sg;
    865       Segment* sg_next;
    866       for (sg = DRD_(g_threadinfo)[i].first;
    867            sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
    868            sg = sg_next)
    869       {
    870          thread_discard_segment(i, sg);
    871       }
    872    }
    873    DRD_(vc_cleanup)(&thread_vc_min);
    874 }
    875 
    876 /**
    877  * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
    878  * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
    879  * all segments in the set CS are ordered consistently against both sg1 and
    880  * sg2. The set CS is defined as the set of segments that can immediately
    881  * precede future segments via inter-thread synchronization operations. In
    882  * DRD the set CS consists of the latest segment of each thread combined with
    883  * all segments for which the reference count is strictly greater than one.
    884  * The code below is an optimized version of the following:
    885  *
    886  * for (i = 0; i < DRD_N_THREADS; i++)
    887  * {
    888  *    Segment* sg;
    889  *
    890  *    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
    891  *    {
    892  *       if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
    893  *       {
    894  *          if (   DRD_(vc_lte)(&sg1->vc, &sg->vc)
    895  *              != DRD_(vc_lte)(&sg2->vc, &sg->vc)
    896  *              || DRD_(vc_lte)(&sg->vc, &sg1->vc)
    897  *              != DRD_(vc_lte)(&sg->vc, &sg2->vc))
    898  *          {
    899  *             return False;
    900  *          }
    901  *       }
    902  *    }
    903  * }
    904  */
    905 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
    906                                                Segment* const sg1,
    907                                                Segment* const sg2)
    908 {
    909    unsigned i;
    910 
    911    tl_assert(sg1->next);
    912    tl_assert(sg2->next);
    913    tl_assert(sg1->next == sg2);
    914    tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
    915 
    916    for (i = 0; i < DRD_N_THREADS; i++)
    917    {
    918       Segment* sg;
    919 
    920       for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
    921       {
    922          if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
    923          {
    924             if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
    925                break;
    926             if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
    927                return False;
    928          }
    929       }
    930       for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev)
    931       {
    932          if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
    933          {
    934             if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
    935                break;
    936             if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
    937                return False;
    938          }
    939       }
    940    }
    941    return True;
    942 }
    943 
    944 /**
    945  * Merge all segments that may be merged without triggering false positives
    946  * or discarding real data races. For the theoretical background of segment
    947  * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
    948  * and Koen De Bosschere. Bounding the number of segment histories during
    949  * data race detection. Parallel Computing archive, Volume 28, Issue 9,
    950  * pp 1221-1238, September 2002. This paper contains a proof that merging
    951  * consecutive segments for which the property equiv(s1,s2) holds can be
    952  * merged without reducing the accuracy of datarace detection. Furthermore
    953  * it is also proven that the total number of all segments will never grow
    954  * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
    955  * every time a new segment is created. The property equiv(s1, s2) is defined
    956  * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
    957  * clocks of segments s and s1 are ordered in the same way as those of segments
    958  * s and s2. The set CS is defined as the set of existing segments s that have
    959  * the potential to conflict with not yet created segments, either because the
    960  * segment s is the latest segment of a thread or because it can become the
    961  * immediate predecessor of a new segment due to a synchronization operation.
    962  */
    963 static void thread_merge_segments(void)
    964 {
    965    unsigned i;
    966 
    967    s_new_segments_since_last_merge = 0;
    968 
    969    for (i = 0; i < DRD_N_THREADS; i++)
    970    {
    971       Segment* sg;
    972 
    973 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    974       tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
    975 #endif
    976 
    977       for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
    978       {
    979          if (DRD_(sg_get_refcnt)(sg) == 1
    980              && sg->next
    981              && DRD_(sg_get_refcnt)(sg->next) == 1
    982              && sg->next->next
    983              && thread_consistent_segment_ordering(i, sg, sg->next))
    984          {
    985             /* Merge sg and sg->next into sg. */
    986             DRD_(sg_merge)(sg, sg->next);
    987             thread_discard_segment(i, sg->next);
    988          }
    989       }
    990 
    991 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
    992       tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
    993 #endif
    994    }
    995 }
    996 
    997 /**
    998  * Create a new segment for the specified thread, and discard any segments
    999  * that cannot cause races anymore.
   1000  */
   1001 void DRD_(thread_new_segment)(const DrdThreadId tid)
   1002 {
   1003    Segment* last_sg;
   1004    Segment* new_sg;
   1005 
   1006    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1007              && tid != DRD_INVALID_THREADID);
   1008    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1009 
   1010    last_sg = DRD_(g_threadinfo)[tid].last;
   1011    new_sg = DRD_(sg_new)(tid, tid);
   1012    thread_append_segment(tid, new_sg);
   1013    if (tid == DRD_(g_drd_running_tid) && last_sg)
   1014    {
   1015       DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
   1016       s_update_conflict_set_new_sg_count++;
   1017    }
   1018 
   1019    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1020 
   1021    if (s_segment_merging
   1022        && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
   1023    {
   1024       thread_discard_ordered_segments();
   1025       thread_merge_segments();
   1026    }
   1027 }
   1028 
   1029 /** Call this function after thread 'joiner' joined thread 'joinee'. */
   1030 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
   1031 {
   1032    tl_assert(joiner != joinee);
   1033    tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
   1034              && joiner != DRD_INVALID_THREADID);
   1035    tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
   1036              && joinee != DRD_INVALID_THREADID);
   1037    tl_assert(DRD_(g_threadinfo)[joiner].last);
   1038    tl_assert(DRD_(g_threadinfo)[joinee].last);
   1039 
   1040    if (DRD_(sg_get_trace)())
   1041    {
   1042       char *str1, *str2;
   1043       str1 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
   1044       str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joinee].last->vc);
   1045       VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
   1046                    str1, str2);
   1047       VG_(free)(str1);
   1048       VG_(free)(str2);
   1049    }
   1050    if (joiner == DRD_(g_drd_running_tid))
   1051    {
   1052       VectorClock old_vc;
   1053 
   1054       DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[joiner].last->vc);
   1055       DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
   1056                        &DRD_(g_threadinfo)[joinee].last->vc);
   1057       DRD_(thread_update_conflict_set)(joiner, &old_vc);
   1058       s_update_conflict_set_join_count++;
   1059       DRD_(vc_cleanup)(&old_vc);
   1060    }
   1061    else
   1062    {
   1063       DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
   1064                        &DRD_(g_threadinfo)[joinee].last->vc);
   1065    }
   1066 
   1067    thread_discard_ordered_segments();
   1068 
   1069    if (DRD_(sg_get_trace)())
   1070    {
   1071       char* str;
   1072       str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
   1073       VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
   1074       VG_(free)(str);
   1075    }
   1076 }
   1077 
   1078 /**
   1079  * Update the vector clock of the last segment of thread tid with the
   1080  * the vector clock of segment sg.
   1081  */
   1082 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
   1083 {
   1084    const VectorClock* const vc = &sg->vc;
   1085 
   1086    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1087              && tid != DRD_INVALID_THREADID);
   1088    tl_assert(DRD_(g_threadinfo)[tid].last);
   1089    tl_assert(sg);
   1090    tl_assert(vc);
   1091 
   1092    if (tid != sg->tid)
   1093    {
   1094       VectorClock old_vc;
   1095 
   1096       DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc);
   1097       DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc);
   1098       if (DRD_(sg_get_trace)())
   1099       {
   1100          char *str1, *str2;
   1101          str1 = DRD_(vc_aprint)(&old_vc);
   1102          str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
   1103          VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
   1104          VG_(free)(str1);
   1105          VG_(free)(str2);
   1106       }
   1107 
   1108       thread_discard_ordered_segments();
   1109 
   1110       DRD_(thread_update_conflict_set)(tid, &old_vc);
   1111       s_update_conflict_set_sync_count++;
   1112 
   1113       DRD_(vc_cleanup)(&old_vc);
   1114    }
   1115    else
   1116    {
   1117       tl_assert(DRD_(vc_lte)(vc, &DRD_(g_threadinfo)[tid].last->vc));
   1118    }
   1119 }
   1120 
   1121 /**
   1122  * Create a new segment for thread tid and update the vector clock of the last
   1123  * segment of this thread with the the vector clock of segment sg. Call this
   1124  * function after thread tid had to wait because of thread synchronization
   1125  * until the memory accesses in the segment sg finished.
   1126  */
   1127 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
   1128 {
   1129    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1130              && tid != DRD_INVALID_THREADID);
   1131    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1132    tl_assert(sg);
   1133 
   1134    thread_append_segment(tid, DRD_(sg_new)(tid, tid));
   1135 
   1136    thread_combine_vc_sync(tid, sg);
   1137 
   1138    if (s_segment_merging
   1139        && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
   1140    {
   1141       thread_discard_ordered_segments();
   1142       thread_merge_segments();
   1143    }
   1144 }
   1145 
   1146 /**
   1147  * Call this function whenever a thread is no longer using the memory
   1148  * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
   1149  * increase.
   1150  */
   1151 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2,
   1152                                  const Bool dont_clear_access)
   1153 {
   1154    DrdThreadId other_user;
   1155    unsigned i;
   1156 
   1157    /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
   1158    other_user = DRD_INVALID_THREADID;
   1159    for (i = 0; i < DRD_N_THREADS; i++)
   1160    {
   1161       Segment* p;
   1162       for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
   1163       {
   1164          if (other_user == DRD_INVALID_THREADID
   1165              && i != DRD_(g_drd_running_tid))
   1166          {
   1167             if (UNLIKELY((!dont_clear_access
   1168                           && DRD_(bm_test_and_clear)(DRD_(sg_bm)(p), a1, a2))
   1169                          || (dont_clear_access
   1170                              && DRD_(bm_has_any_access)(DRD_(sg_bm)(p), a1, a2))
   1171                          ))
   1172             {
   1173                other_user = i;
   1174             }
   1175             continue;
   1176          }
   1177          if (!dont_clear_access)
   1178             DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
   1179       }
   1180    }
   1181 
   1182    /*
   1183     * If any other thread had accessed memory in [ a1, a2 [, update the
   1184     * conflict set.
   1185     */
   1186    if (other_user != DRD_INVALID_THREADID
   1187        && DRD_(bm_has_any_access)(DRD_(g_conflict_set), a1, a2))
   1188    {
   1189       thread_compute_conflict_set(&DRD_(g_conflict_set),
   1190                                   DRD_(thread_get_running_tid)());
   1191    }
   1192 }
   1193 
   1194 /** Specify whether memory loads should be recorded. */
   1195 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
   1196 {
   1197    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1198              && tid != DRD_INVALID_THREADID);
   1199    tl_assert(enabled == !! enabled);
   1200 
   1201    DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
   1202 }
   1203 
   1204 /** Specify whether memory stores should be recorded. */
   1205 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
   1206 {
   1207    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1208              && tid != DRD_INVALID_THREADID);
   1209    tl_assert(enabled == !! enabled);
   1210 
   1211    DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
   1212 }
   1213 
   1214 /**
   1215  * Print the segment information for all threads.
   1216  *
   1217  * This function is only used for debugging purposes.
   1218  */
   1219 void DRD_(thread_print_all)(void)
   1220 {
   1221    unsigned i;
   1222    Segment* p;
   1223 
   1224    for (i = 0; i < DRD_N_THREADS; i++)
   1225    {
   1226       if (DRD_(g_threadinfo)[i].first)
   1227       {
   1228          VG_(printf)("**************\n"
   1229                      "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
   1230                      "**************\n",
   1231                      i,
   1232                      DRD_(g_threadinfo)[i].vg_thread_exists,
   1233                      DRD_(g_threadinfo)[i].vg_threadid,
   1234                      DRD_(g_threadinfo)[i].posix_thread_exists,
   1235                      DRD_(g_threadinfo)[i].pt_threadid,
   1236                      DRD_(g_threadinfo)[i].detached_posix_thread);
   1237          for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
   1238          {
   1239             DRD_(sg_print)(p);
   1240          }
   1241       }
   1242    }
   1243 }
   1244 
   1245 /** Show a call stack involved in a data race. */
   1246 static void show_call_stack(const DrdThreadId tid,
   1247                             const Char* const msg,
   1248                             ExeContext* const callstack)
   1249 {
   1250    const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
   1251 
   1252    VG_(message)(Vg_UserMsg, "%s (thread %d)\n", msg, tid);
   1253 
   1254    if (vg_tid != VG_INVALID_THREADID)
   1255    {
   1256       if (callstack)
   1257       {
   1258          VG_(pp_ExeContext)(callstack);
   1259       }
   1260       else
   1261       {
   1262          VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
   1263       }
   1264    }
   1265    else
   1266    {
   1267       VG_(message)(Vg_UserMsg,
   1268                    "   (thread finished, call stack no longer available)\n");
   1269    }
   1270 }
   1271 
   1272 /** Print information about the segments involved in a data race. */
   1273 static void
   1274 thread_report_conflicting_segments_segment(const DrdThreadId tid,
   1275                                            const Addr addr,
   1276                                            const SizeT size,
   1277                                            const BmAccessTypeT access_type,
   1278                                            const Segment* const p)
   1279 {
   1280    unsigned i;
   1281 
   1282    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1283              && tid != DRD_INVALID_THREADID);
   1284    tl_assert(p);
   1285 
   1286    for (i = 0; i < DRD_N_THREADS; i++)
   1287    {
   1288       if (i != tid)
   1289       {
   1290          Segment* q;
   1291          for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
   1292          {
   1293             /*
   1294              * Since q iterates over the segments of thread i in order of
   1295              * decreasing vector clocks, if q->vc <= p->vc, then
   1296              * q->next->vc <= p->vc will also hold. Hence, break out of the
   1297              * loop once this condition is met.
   1298              */
   1299             if (DRD_(vc_lte)(&q->vc, &p->vc))
   1300                break;
   1301             if (! DRD_(vc_lte)(&p->vc, &q->vc))
   1302             {
   1303                if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
   1304                                               access_type))
   1305                {
   1306                   tl_assert(q->stacktrace);
   1307                   show_call_stack(i,        "Other segment start",
   1308                                   q->stacktrace);
   1309                   show_call_stack(i,        "Other segment end",
   1310                                   q->next ? q->next->stacktrace : 0);
   1311                }
   1312             }
   1313          }
   1314       }
   1315    }
   1316 }
   1317 
   1318 /** Print information about all segments involved in a data race. */
   1319 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
   1320                                               const Addr addr,
   1321                                               const SizeT size,
   1322                                               const BmAccessTypeT access_type)
   1323 {
   1324    Segment* p;
   1325 
   1326    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1327              && tid != DRD_INVALID_THREADID);
   1328 
   1329    for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
   1330    {
   1331       if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
   1332       {
   1333          thread_report_conflicting_segments_segment(tid, addr, size,
   1334                                                     access_type, p);
   1335       }
   1336    }
   1337 }
   1338 
   1339 /**
   1340  * Verify whether the conflict set for thread tid is up to date. Only perform
   1341  * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
   1342  */
   1343 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
   1344 {
   1345    static int do_verify_conflict_set = -1;
   1346    Bool result;
   1347    struct bitmap* computed_conflict_set = 0;
   1348 
   1349    if (do_verify_conflict_set < 0)
   1350       do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
   1351 
   1352    if (do_verify_conflict_set == 0)
   1353       return True;
   1354 
   1355    thread_compute_conflict_set(&computed_conflict_set, tid);
   1356    result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
   1357    if (! result)
   1358    {
   1359       VG_(printf)("actual conflict set:\n");
   1360       DRD_(bm_print)(DRD_(g_conflict_set));
   1361       VG_(printf)("\n");
   1362       VG_(printf)("computed conflict set:\n");
   1363       DRD_(bm_print)(computed_conflict_set);
   1364       VG_(printf)("\n");
   1365    }
   1366    DRD_(bm_delete)(computed_conflict_set);
   1367    return result;
   1368 }
   1369 
   1370 /**
   1371  * Compute the conflict set: a bitmap that represents the union of all memory
   1372  * accesses of all segments that are unordered to the current segment of the
   1373  * thread tid.
   1374  */
   1375 static void thread_compute_conflict_set(struct bitmap** conflict_set,
   1376                                         const DrdThreadId tid)
   1377 {
   1378    Segment* p;
   1379 
   1380    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1381              && tid != DRD_INVALID_THREADID);
   1382    tl_assert(tid == DRD_(g_drd_running_tid));
   1383 
   1384    s_compute_conflict_set_count++;
   1385    s_conflict_set_bitmap_creation_count
   1386       -= DRD_(bm_get_bitmap_creation_count)();
   1387    s_conflict_set_bitmap2_creation_count
   1388       -= DRD_(bm_get_bitmap2_creation_count)();
   1389 
   1390    if (*conflict_set)
   1391    {
   1392       DRD_(bm_cleanup)(*conflict_set);
   1393       DRD_(bm_init)(*conflict_set);
   1394    }
   1395    else
   1396    {
   1397       *conflict_set = DRD_(bm_new)();
   1398    }
   1399 
   1400    if (s_trace_conflict_set)
   1401    {
   1402       char* str;
   1403 
   1404       str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
   1405       VG_(message)(Vg_DebugMsg,
   1406                    "computing conflict set for thread %d with vc %s\n",
   1407                    tid, str);
   1408       VG_(free)(str);
   1409    }
   1410 
   1411    p = DRD_(g_threadinfo)[tid].last;
   1412    {
   1413       unsigned j;
   1414 
   1415       if (s_trace_conflict_set)
   1416       {
   1417          char* vc;
   1418 
   1419          vc = DRD_(vc_aprint)(&p->vc);
   1420          VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
   1421                       tid, vc);
   1422          VG_(free)(vc);
   1423       }
   1424 
   1425       for (j = 0; j < DRD_N_THREADS; j++)
   1426       {
   1427          if (j != tid && DRD_(IsValidDrdThreadId)(j))
   1428          {
   1429             Segment* q;
   1430             for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
   1431             {
   1432                if (! DRD_(vc_lte)(&q->vc, &p->vc)
   1433                    && ! DRD_(vc_lte)(&p->vc, &q->vc))
   1434                {
   1435                   if (s_trace_conflict_set)
   1436                   {
   1437                      char* str;
   1438 
   1439                      str = DRD_(vc_aprint)(&q->vc);
   1440                      VG_(message)(Vg_DebugMsg,
   1441                                   "conflict set: [%d] merging segment %s\n",
   1442                                   j, str);
   1443                      VG_(free)(str);
   1444                   }
   1445                   DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
   1446                }
   1447                else
   1448                {
   1449                   if (s_trace_conflict_set)
   1450                   {
   1451                      char* str;
   1452 
   1453                      str = DRD_(vc_aprint)(&q->vc);
   1454                      VG_(message)(Vg_DebugMsg,
   1455                                   "conflict set: [%d] ignoring segment %s\n",
   1456                                   j, str);
   1457                      VG_(free)(str);
   1458                   }
   1459                }
   1460             }
   1461          }
   1462       }
   1463    }
   1464 
   1465    s_conflict_set_bitmap_creation_count
   1466       += DRD_(bm_get_bitmap_creation_count)();
   1467    s_conflict_set_bitmap2_creation_count
   1468       += DRD_(bm_get_bitmap2_creation_count)();
   1469 
   1470    if (s_trace_conflict_set_bm)
   1471    {
   1472       VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
   1473       DRD_(bm_print)(*conflict_set);
   1474       VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
   1475    }
   1476 }
   1477 
   1478 /**
   1479  * Update the conflict set after the vector clock of thread tid has been
   1480  * updated from old_vc to its current value, either because a new segment has
   1481  * been created or because of a synchronization operation.
   1482  */
   1483 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
   1484                                       const VectorClock* const old_vc)
   1485 {
   1486    const VectorClock* new_vc;
   1487    Segment* p;
   1488    unsigned j;
   1489 
   1490    tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
   1491              && tid != DRD_INVALID_THREADID);
   1492    tl_assert(old_vc);
   1493    tl_assert(tid == DRD_(g_drd_running_tid));
   1494    tl_assert(DRD_(g_conflict_set));
   1495 
   1496    if (s_trace_conflict_set)
   1497    {
   1498       char* str;
   1499 
   1500       str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
   1501       VG_(message)(Vg_DebugMsg,
   1502                    "updating conflict set for thread %d with vc %s\n",
   1503                    tid, str);
   1504       VG_(free)(str);
   1505    }
   1506 
   1507    new_vc = &DRD_(g_threadinfo)[tid].last->vc;
   1508 
   1509    DRD_(bm_unmark)(DRD_(g_conflict_set));
   1510 
   1511    for (j = 0; j < DRD_N_THREADS; j++)
   1512    {
   1513       Segment* q;
   1514 
   1515       if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
   1516          continue;
   1517 
   1518       for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
   1519       {
   1520          const int included_in_old_conflict_set
   1521             = ! DRD_(vc_lte)(&q->vc, old_vc)
   1522             && ! DRD_(vc_lte)(old_vc, &q->vc);
   1523          const int included_in_new_conflict_set
   1524             = ! DRD_(vc_lte)(&q->vc, new_vc)
   1525             && ! DRD_(vc_lte)(new_vc, &q->vc);
   1526          if (included_in_old_conflict_set != included_in_new_conflict_set)
   1527          {
   1528             if (s_trace_conflict_set)
   1529             {
   1530                char* str;
   1531 
   1532                str = DRD_(vc_aprint)(&q->vc);
   1533                VG_(message)(Vg_DebugMsg,
   1534                             "conflict set: [%d] merging segment %s\n", j, str);
   1535                VG_(free)(str);
   1536             }
   1537             DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
   1538          }
   1539          else
   1540          {
   1541             if (s_trace_conflict_set)
   1542             {
   1543                char* str;
   1544 
   1545                str = DRD_(vc_aprint)(&q->vc);
   1546                VG_(message)(Vg_DebugMsg,
   1547                             "conflict set: [%d] ignoring segment %s\n", j, str);
   1548                VG_(free)(str);
   1549             }
   1550          }
   1551       }
   1552    }
   1553 
   1554    DRD_(bm_clear_marked)(DRD_(g_conflict_set));
   1555 
   1556    p = DRD_(g_threadinfo)[tid].last;
   1557    {
   1558       for (j = 0; j < DRD_N_THREADS; j++)
   1559       {
   1560          if (j != tid && DRD_(IsValidDrdThreadId)(j))
   1561          {
   1562             Segment* q;
   1563             for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
   1564             {
   1565                if (! DRD_(vc_lte)(&q->vc, &p->vc)
   1566                    && ! DRD_(vc_lte)(&p->vc, &q->vc))
   1567                {
   1568                   DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
   1569                }
   1570             }
   1571          }
   1572       }
   1573    }
   1574 
   1575    DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
   1576 
   1577    s_update_conflict_set_count++;
   1578 
   1579    if (s_trace_conflict_set_bm)
   1580    {
   1581       VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
   1582       DRD_(bm_print)(DRD_(g_conflict_set));
   1583       VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
   1584    }
   1585 
   1586    tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
   1587 }
   1588 
   1589 /** Report the number of context switches performed. */
   1590 ULong DRD_(thread_get_context_switch_count)(void)
   1591 {
   1592    return s_context_switch_count;
   1593 }
   1594 
   1595 /** Report the number of ordered segments that have been discarded. */
   1596 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
   1597 {
   1598    return s_discard_ordered_segments_count;
   1599 }
   1600 
   1601 /** Return how many times the conflict set has been updated entirely. */
   1602 ULong DRD_(thread_get_compute_conflict_set_count)()
   1603 {
   1604    return s_compute_conflict_set_count;
   1605 }
   1606 
   1607 /** Return how many times the conflict set has been updated partially. */
   1608 ULong DRD_(thread_get_update_conflict_set_count)(void)
   1609 {
   1610    return s_update_conflict_set_count;
   1611 }
   1612 
   1613 /**
   1614  * Return how many times the conflict set has been updated partially
   1615  * because a new segment has been created.
   1616  */
   1617 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
   1618 {
   1619    return s_update_conflict_set_new_sg_count;
   1620 }
   1621 
   1622 /**
   1623  * Return how many times the conflict set has been updated partially
   1624  * because of combining vector clocks due to synchronization operations
   1625  * other than reader/writer lock or barrier operations.
   1626  */
   1627 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
   1628 {
   1629    return s_update_conflict_set_sync_count;
   1630 }
   1631 
   1632 /**
   1633  * Return how many times the conflict set has been updated partially
   1634  * because of thread joins.
   1635  */
   1636 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
   1637 {
   1638    return s_update_conflict_set_join_count;
   1639 }
   1640 
   1641 /**
   1642  * Return the number of first-level bitmaps that have been created during
   1643  * conflict set updates.
   1644  */
   1645 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
   1646 {
   1647    return s_conflict_set_bitmap_creation_count;
   1648 }
   1649 
   1650 /**
   1651  * Return the number of second-level bitmaps that have been created during
   1652  * conflict set updates.
   1653  */
   1654 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
   1655 {
   1656    return s_conflict_set_bitmap2_creation_count;
   1657 }
   1658