Home | History | Annotate | Download | only in helgrind
      1 /*
      2    ----------------------------------------------------------------
      3 
      4    Notice that the above BSD-style license applies to this one file
      5    (helgrind.h) only.  The entire rest of Valgrind is licensed under
      6    the terms of the GNU General Public License, version 2.  See the
      7    COPYING file in the source distribution for details.
      8 
      9    ----------------------------------------------------------------
     10 
     11    This file is part of Helgrind, a Valgrind tool for detecting errors
     12    in threaded programs.
     13 
     14    Copyright (C) 2007-2010 OpenWorks LLP
     15       info (at) open-works.co.uk
     16 
     17    Redistribution and use in source and binary forms, with or without
     18    modification, are permitted provided that the following conditions
     19    are met:
     20 
     21    1. Redistributions of source code must retain the above copyright
     22       notice, this list of conditions and the following disclaimer.
     23 
     24    2. The origin of this software must not be misrepresented; you must
     25       not claim that you wrote the original software.  If you use this
     26       software in a product, an acknowledgment in the product
     27       documentation would be appreciated but is not required.
     28 
     29    3. Altered source versions must be plainly marked as such, and must
     30       not be misrepresented as being the original software.
     31 
     32    4. The name of the author may not be used to endorse or promote
     33       products derived from this software without specific prior written
     34       permission.
     35 
     36    THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     37    OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     38    WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     39    ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
     40    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     41    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     42    GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     43    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     44    WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     45    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     46    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     47 
     48    ----------------------------------------------------------------
     49 
     50    Notice that the above BSD-style license applies to this one file
     51    (helgrind.h) only.  The entire rest of Valgrind is licensed under
     52    the terms of the GNU General Public License, version 2.  See the
     53    COPYING file in the source distribution for details.
     54 
     55    ----------------------------------------------------------------
     56 */
     57 
     58 #ifndef __HELGRIND_H
     59 #define __HELGRIND_H
     60 
     61 #include "valgrind.h"
     62 
     63 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
     64    This enum comprises an ABI exported by Valgrind to programs
     65    which use client requests.  DO NOT CHANGE THE ORDER OF THESE
     66    ENTRIES, NOR DELETE ANY -- add new ones at the end. */
     67 typedef
     68    enum {
     69       VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
     70 
     71       /* The rest are for Helgrind's internal use.  Not for end-user
     72          use.  Do not use them unless you are a Valgrind developer. */
     73 
     74       /* Notify the tool what this thread's pthread_t is. */
     75       _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
     76                                          + 256,
     77       _VG_USERREQ__HG_PTH_API_ERROR,              /* char*, int */
     78       _VG_USERREQ__HG_PTHREAD_JOIN_POST,          /* pthread_t of quitter */
     79       _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,    /* pth_mx_t*, long mbRec */
     80       _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,  /* pth_mx_t* */
     81       _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,   /* pth_mx_t* */
     82       _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,  /* pth_mx_t* */
     83       _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */
     84       _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,    /* pth_mx_t* */
     85       _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE,    /* pth_cond_t* */
     86       _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
     87       _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,     /* pth_cond_t*, pth_mx_t* */
     88       _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,    /* pth_cond_t*, pth_mx_t* */
     89       _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE,   /* pth_cond_t* */
     90       _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,   /* pth_rwlk_t* */
     91       _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
     92       _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE,    /* pth_rwlk_t*, long isW */
     93       _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,   /* pth_rwlk_t*, long isW */
     94       _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,  /* pth_rwlk_t* */
     95       _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
     96       _VG_USERREQ__HG_POSIX_SEM_INIT_POST,        /* sem_t*, ulong value */
     97       _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,      /* sem_t* */
     98       _VG_USERREQ__HG_POSIX_SEM_POST_PRE,         /* sem_t* */
     99       _VG_USERREQ__HG_POSIX_SEM_WAIT_POST,        /* sem_t* */
    100       _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   /* pth_bar_t*, ulong, ulong */
    101       _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,   /* pth_bar_t* */
    102       _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
    103       _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE,  /* pth_slk_t* */
    104       _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
    105       _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE,      /* pth_slk_t* */
    106       _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST,     /* pth_slk_t* */
    107       _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE,   /* pth_slk_t* */
    108       _VG_USERREQ__HG_CLIENTREQ_UNIMP,            /* char* */
    109       _VG_USERREQ__HG_USERSO_SEND_PRE,        /* arbitrary UWord SO-tag */
    110       _VG_USERREQ__HG_USERSO_RECV_POST,       /* arbitrary UWord SO-tag */
    111       _VG_USERREQ__HG_RESERVED1,              /* Do not use */
    112       _VG_USERREQ__HG_RESERVED2,              /* Do not use */
    113       _VG_USERREQ__HG_RESERVED3,              /* Do not use */
    114       _VG_USERREQ__HG_RESERVED4,              /* Do not use */
    115       _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
    116       _VG_USERREQ__HG_ARANGE_MAKE_TRACKED,   /* Addr a, ulong len */
    117       _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
    118       _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK  /* Addr start_of_block */
    119 
    120    } Vg_TCheckClientRequest;
    121 
    122 
    123 /*----------------------------------------------------------------*/
    124 /*---                                                          ---*/
    125 /*--- Implementation-only facilities.  Not for end-user use.   ---*/
    126 /*--- For end-user facilities see below (the next section in   ---*/
    127 /*--- this file.)                                              ---*/
    128 /*---                                                          ---*/
    129 /*----------------------------------------------------------------*/
    130 
    131 /* Do a client request.  These are macros rather than a functions so
    132    as to avoid having an extra frame in stack traces.
    133 
    134    NB: these duplicate definitions in hg_intercepts.c.  But here, we
    135    have to make do with weaker typing (no definition of Word etc) and
    136    no assertions, whereas in helgrind.h we can use those facilities.
    137    Obviously it's important the two sets of definitions are kept in
    138    sync.
    139 
    140    The commented-out asserts should actually hold, but unfortunately
    141    they can't be allowed to be visible here, because that would
    142    require the end-user code to #include <assert.h>.
    143 */
    144 
    145 #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F)                \
    146    do {                                                  \
    147       long int _unused_res, _arg1;                       \
    148       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    149       _arg1 = (long int)(_arg1F);                        \
    150       VALGRIND_DO_CLIENT_REQUEST(_unused_res, 0,         \
    151                                  (_creqF),               \
    152                                  _arg1, 0,0,0,0);        \
    153    } while (0)
    154 
    155 #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
    156    do {                                                  \
    157       long int _qzz_res, _arg1;                          \
    158       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    159       _arg1 = (long int)(_arg1F);                        \
    160       VALGRIND_DO_CLIENT_REQUEST(_qzz_res, (_dfltF),     \
    161                                  (_creqF),               \
    162                                  _arg1, 0,0,0,0);        \
    163       _resF = _qzz_res;                                  \
    164    } while (0)
    165 
    166 #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
    167    do {                                                  \
    168       long int _unused_res, _arg1, _arg2;                \
    169       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    170       /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
    171       _arg1 = (long int)(_arg1F);                        \
    172       _arg2 = (long int)(_arg2F);                        \
    173       VALGRIND_DO_CLIENT_REQUEST(_unused_res, 0,         \
    174                                  (_creqF),               \
    175                                  _arg1,_arg2,0,0,0);     \
    176    } while (0)
    177 
    178 #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F,              \
    179                       _ty2F,_arg2F, _ty3F, _arg3F)       \
    180    do {                                                  \
    181       long int _unused_res, _arg1, _arg2, _arg3;         \
    182       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    183       /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
    184       /* assert(sizeof(_ty3F) == sizeof(long int)); */   \
    185       _arg1 = (long int)(_arg1F);                        \
    186       _arg2 = (long int)(_arg2F);                        \
    187       _arg3 = (long int)(_arg3F);                        \
    188       VALGRIND_DO_CLIENT_REQUEST(_unused_res, 0,         \
    189                                  (_creqF),               \
    190                                  _arg1,_arg2,_arg3,0,0); \
    191    } while (0)
    192 
    193 
    194 #define _HG_CLIENTREQ_UNIMP(_qzz_str)                    \
    195    DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP,          \
    196                (char*),(_qzz_str))
    197 
    198 
    199 /*----------------------------------------------------------------*/
    200 /*---                                                          ---*/
    201 /*--- Helgrind-native requests.  These allow access to         ---*/
    202 /*--- the same set of annotation primitives that are used      ---*/
    203 /*--- to build the POSIX pthread wrappers.                     ---*/
    204 /*---                                                          ---*/
    205 /*----------------------------------------------------------------*/
    206 
    207 /* ----------------------------------------------------------
    208    For describing ordinary mutexes (non-rwlocks).  For rwlock
    209    descriptions see ANNOTATE_RWLOCK_* below.
    210    ---------------------------------------------------------- */
    211 
    212 /* Notify here immediately after mutex creation.  _mbRec == 0 for a
    213    non-recursive mutex, 1 for a recursive mutex. */
    214 #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec)          \
    215    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,     \
    216                 void*,(_mutex), long,(_mbRec))
    217 
    218 /* Notify here immediately before mutex acquisition.  _isTryLock == 0
    219    for a normal acquisition, 1 for a "try" style acquisition. */
    220 #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock)       \
    221    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE,      \
    222                 void*,(_mutex), long,(_isTryLock))
    223 
    224 /* Notify here immediately after a successful mutex acquisition. */
    225 #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex)                  \
    226    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,      \
    227                void*,(_mutex))
    228 
    229 /* Notify here immediately before a mutex release. */
    230 #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex)                 \
    231    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,     \
    232                void*,(_mutex))
    233 
    234 /* Notify here immediately after a mutex release. */
    235 #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex)                \
    236    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,    \
    237                void*,(_mutex))
    238 
    239 /* Notify here immediately before mutex destruction. */
    240 #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex)                \
    241    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,    \
    242                void*,(_mutex))
    243 
    244 /* ----------------------------------------------------------
    245    For describing semaphores.
    246    ---------------------------------------------------------- */
    247 
    248 /* Notify here immediately after semaphore creation. */
    249 #define VALGRIND_HG_SEM_INIT_POST(_sem, _value)              \
    250    DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST,         \
    251                 void*, (_sem), unsigned long, (_value))
    252 
    253 /* Notify here immediately after a semaphore wait (an acquire-style
    254    operation) */
    255 #define VALGRIND_HG_SEM_WAIT_POST(_sem)                      \
    256    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_WAIT_POST,          \
    257                void*,(_sem))
    258 
    259 /* Notify here immediately before semaphore post (a release-style
    260    operation) */
    261 #define VALGRIND_HG_SEM_POST_PRE(_sem)                       \
    262    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_POST_PRE,           \
    263                void*,(_sem))
    264 
    265 /* Notify here immediately before semaphore destruction. */
    266 #define VALGRIND_HG_SEM_DESTROY_PRE(_sem)                    \
    267    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,        \
    268                void*, (_sem))
    269 
    270 /* ----------------------------------------------------------
    271    For describing barriers.
    272    ---------------------------------------------------------- */
    273 
    274 /* Notify here immediately before barrier creation.  _count is the
    275    capacity.  _resizable == 0 means the barrier may not be resized, 1
    276    means it may be. */
    277 #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
    278    DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   \
    279                  void*,(_bar),                               \
    280                  unsigned long,(_count),                     \
    281                  unsigned long,(_resizable))
    282 
    283 /* Notify here immediately before arrival at a barrier. */
    284 #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar)                   \
    285    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,     \
    286                void*,(_bar))
    287 
    288 /* Notify here immediately before a resize (change of barrier
    289    capacity).  If _newcount >= the existing capacity, then there is no
    290    change in the state of any threads waiting at the barrier.  If
    291    _newcount < the existing capacity, and >= _newcount threads are
    292    currently waiting at the barrier, then this notification is
    293    considered to also have the effect of telling the checker that all
    294    waiting threads have now moved past the barrier.  (I can't think of
    295    any other sane semantics.) */
    296 #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount)      \
    297    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE,  \
    298                 void*,(_bar),                                \
    299                 unsigned long,(_newcount))
    300 
    301 /* Notify here immediately before barrier destruction. */
    302 #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar)                \
    303    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE,  \
    304                void*,(_bar))
    305 
    306 /* ----------------------------------------------------------
    307    For describing memory ownership changes.
    308    ---------------------------------------------------------- */
    309 
    310 /* Clean memory state.  This makes Helgrind forget everything it knew
    311    about the specified memory range.  Effectively this announces that
    312    the specified memory range now "belongs" to the calling thread, so
    313    that: (1) the calling thread can access it safely without
    314    synchronisation, and (2) all other threads must sync with this one
    315    to access it safely.  This is particularly useful for memory
    316    allocators that wish to recycle memory. */
    317 #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len)       \
    318    DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY,                 \
    319                 void*,(_qzz_start),                          \
    320                 unsigned long,(_qzz_len))
    321 
    322 /* The same, but for the heap block starting at _qzz_blockstart.  This
    323    allows painting when we only know the address of an object, but not
    324    its size, which is sometimes the case in C++ code involving
    325    inheritance, and in which RTTI is not, for whatever reason,
    326    available.  Returns the number of bytes painted, which can be zero
    327    for a zero-sized block.  Hence, return values >= 0 indicate success
    328    (the block was found), and the value -1 indicates block not
    329    found, and -2 is returned when not running on Helgrind. */
    330 #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart)  \
    331    (__extension__                                            \
    332    ({long int _npainted;                                     \
    333      DO_CREQ_W_W(_npainted, (-2)/*default*/,                 \
    334                  _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK,     \
    335                             void*,(_qzz_blockstart));        \
    336      _npainted;                                              \
    337    }))
    338 
    339 /* ----------------------------------------------------------
    340    For error control.
    341    ---------------------------------------------------------- */
    342 
    343 /* Tell H that an address range is not to be "tracked" until further
    344    notice.  This puts it in the NOACCESS state, in which case we
    345    ignore all reads and writes to it.  Useful for ignoring ranges of
    346    memory where there might be races we don't want to see.  If the
    347    memory is subsequently reallocated via malloc/new/stack allocation,
    348    then it is put back in the trackable state.  Hence it is safe in
    349    the situation where checking is disabled, the containing area is
    350    deallocated and later reallocated for some other purpose. */
    351 #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len)   \
    352    DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED,       \
    353                  void*,(_qzz_start),                         \
    354                  unsigned long,(_qzz_len))
    355 
    356 /* And put it back into the normal "tracked" state, that is, make it
    357    once again subject to the normal race-checking machinery.  This
    358    puts it in the same state as new memory allocated by this thread --
    359    that is, basically owned exclusively by this thread. */
    360 #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len)    \
    361    DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED,         \
    362                  void*,(_qzz_start),                         \
    363                  unsigned long,(_qzz_len))
    364 
    365 
    366 /*----------------------------------------------------------------*/
    367 /*---                                                          ---*/
    368 /*--- ThreadSanitizer-compatible requests                      ---*/
    369 /*--- (mostly unimplemented)                                   ---*/
    370 /*---                                                          ---*/
    371 /*----------------------------------------------------------------*/
    372 
    373 /* A quite-broad set of annotations, as used in the ThreadSanitizer
    374    project.  This implementation aims to be a (source-level)
    375    compatible implementation of the macros defined in:
    376 
    377    http://code.google.com/p/data-race-test/source
    378           /browse/trunk/dynamic_annotations/dynamic_annotations.h
    379 
    380    (some of the comments below are taken from the above file)
    381 
    382    The implementation here is very incomplete, and intended as a
    383    starting point.  Many of the macros are unimplemented.  Rather than
    384    allowing unimplemented macros to silently do nothing, they cause an
    385    assertion.  Intention is to implement them on demand.
    386 
    387    The major use of these macros is to make visible to race detectors,
    388    the behaviour (effects) of user-implemented synchronisation
    389    primitives, that the detectors could not otherwise deduce from the
    390    normal observation of pthread etc calls.
    391 
    392    Some of the macros are no-ops in Helgrind.  That's because Helgrind
    393    is a pure happens-before detector, whereas ThreadSanitizer uses a
    394    hybrid lockset and happens-before scheme, which requires more
    395    accurate annotations for correct operation.
    396 
    397    The macros are listed in the same order as in dynamic_annotations.h
    398    (URL just above).
    399 
    400    I should point out that I am less than clear about the intended
    401    semantics of quite a number of them.  Comments and clarifications
    402    welcomed!
    403 */
    404 
    405 /* ----------------------------------------------------------------
    406    These four allow description of user-level condition variables,
    407    apparently in the style of POSIX's pthread_cond_t.  Currently
    408    unimplemented and will assert.
    409    ----------------------------------------------------------------
    410 */
    411 /* Report that wait on the condition variable at address CV has
    412    succeeded and the lock at address LOCK is now held.  CV and LOCK
    413    are completely arbitrary memory addresses which presumably mean
    414    something to the application, but are meaningless to Helgrind. */
    415 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
    416    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
    417 
    418 /* Report that wait on the condition variable at CV has succeeded.
    419    Variant w/o lock. */
    420 #define ANNOTATE_CONDVAR_WAIT(cv) \
    421    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
    422 
    423 /* Report that we are about to signal on the condition variable at
    424    address CV. */
    425 #define ANNOTATE_CONDVAR_SIGNAL(cv) \
    426    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
    427 
    428 /* Report that we are about to signal_all on the condition variable at
    429    CV. */
    430 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
    431    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
    432 
    433 
    434 /* ----------------------------------------------------------------
    435    Create completely arbitrary happens-before edges between threads.
    436    If thread T1 does ANNOTATE_HAPPENS_BEFORE(obj) and later (w.r.t.
    437    some notional global clock for the computation) thread T2 does
    438    ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all memory
    439    accesses done by T1 before the ..BEFORE.. call as happening-before
    440    all memory accesses done by T2 after the ..AFTER.. call.  Hence
    441    Helgrind won't complain about races if T2's accesses afterwards are
    442    to the same locations as T1's accesses before.
    443 
    444    OBJ is a machine word (unsigned long, or void*), is completely
    445    arbitrary, and denotes the identity of some synchronisation object
    446    you're modelling.
    447 
    448    You must do the _BEFORE call just before the real sync event on the
    449    signaller's side, and _AFTER just after the real sync event on the
    450    waiter's side.
    451 
    452    If none of the rest of these macros make sense to you, at least
    453    take the time to understand these two.  They form the very essence
    454    of describing arbitrary inter-thread synchronisation events to
    455    Helgrind.  You can get a long way just with them alone.
    456    ----------------------------------------------------------------
    457 */
    458 #define ANNOTATE_HAPPENS_BEFORE(obj) \
    459    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
    460 
    461 #define ANNOTATE_HAPPENS_AFTER(obj) \
    462    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
    463 
    464 
    465 /* ----------------------------------------------------------------
    466    Memory publishing.  The TSan sources say:
    467 
    468      Report that the bytes in the range [pointer, pointer+size) are about
    469      to be published safely. The race checker will create a happens-before
    470      arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
    471      subsequent accesses to this memory.
    472 
    473    I'm not sure I understand what this means exactly, nor whether it
    474    is relevant for a pure h-b detector.  Leaving unimplemented for
    475    now.
    476    ----------------------------------------------------------------
    477 */
    478 #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
    479    _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
    480 
    481 /* DEPRECATED. Don't use it. */
    482 /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
    483 
    484 /* DEPRECATED. Don't use it. */
    485 /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
    486 
    487 
    488 /* ----------------------------------------------------------------
    489    TSan sources say:
    490 
    491      Instruct the tool to create a happens-before arc between
    492      MU->Unlock() and MU->Lock().  This annotation may slow down the
    493      race detector; normally it is used only when it would be
    494      difficult to annotate each of the mutex's critical sections
    495      individually using the annotations above.
    496 
    497    If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
    498    In any case, leave as unimp for now.  I'm unsure about the intended
    499    behaviour.
    500    ----------------------------------------------------------------
    501 */
    502 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
    503    _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
    504 
    505 /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
    506 /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
    507 
    508 
    509 /* ----------------------------------------------------------------
    510    TSan sources say:
    511 
    512      Annotations useful when defining memory allocators, or when
    513      memory that was protected in one way starts to be protected in
    514      another.
    515 
    516      Report that a new memory at "address" of size "size" has been
    517      allocated.  This might be used when the memory has been retrieved
    518      from a free list and is about to be reused, or when a the locking
    519      discipline for a variable changes.
    520 
    521    AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
    522    ----------------------------------------------------------------
    523 */
    524 #define ANNOTATE_NEW_MEMORY(address, size) \
    525    VALGRIND_HG_CLEAN_MEMORY((address), (size))
    526 
    527 
    528 /* ----------------------------------------------------------------
    529    TSan sources say:
    530 
    531      Annotations useful when defining FIFO queues that transfer data
    532      between threads.
    533 
    534    All unimplemented.  Am not claiming to understand this (yet).
    535    ----------------------------------------------------------------
    536 */
    537 
    538 /* Report that the producer-consumer queue object at address PCQ has
    539    been created.  The ANNOTATE_PCQ_* annotations should be used only
    540    for FIFO queues.  For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
    541    (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
    542 #define ANNOTATE_PCQ_CREATE(pcq) \
    543    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
    544 
    545 /* Report that the queue at address PCQ is about to be destroyed. */
    546 #define ANNOTATE_PCQ_DESTROY(pcq) \
    547    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
    548 
    549 /* Report that we are about to put an element into a FIFO queue at
    550    address PCQ. */
    551 #define ANNOTATE_PCQ_PUT(pcq) \
    552    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
    553 
    554 /* Report that we've just got an element from a FIFO queue at address
    555    PCQ. */
    556 #define ANNOTATE_PCQ_GET(pcq) \
    557    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
    558 
    559 
    560 /* ----------------------------------------------------------------
    561    Annotations that suppress errors.  It is usually better to express
    562    the program's synchronization using the other annotations, but
    563    these can be used when all else fails.
    564 
    565    Currently these are all unimplemented.  I can't think of a simple
    566    way to implement them without at least some performance overhead.
    567    ----------------------------------------------------------------
    568 */
    569 
    570 /* Report that we may have a benign race at "pointer", with size
    571    "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
    572    point where "pointer" has been allocated, preferably close to the point
    573    where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC.
    574 
    575    XXX: what's this actually supposed to do?  And what's the type of
    576    DESCRIPTION?  When does the annotation stop having an effect?
    577 */
    578 #define ANNOTATE_BENIGN_RACE(pointer, description) \
    579    _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
    580 
    581 /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
    582    the memory range [address, address+size). */
    583 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
    584    _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE_SIZED")
    585 
    586 /* Request the analysis tool to ignore all reads in the current thread
    587    until ANNOTATE_IGNORE_READS_END is called.  Useful to ignore
    588    intentional racey reads, while still checking other reads and all
    589    writes. */
    590 #define ANNOTATE_IGNORE_READS_BEGIN() \
    591    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
    592 
    593 /* Stop ignoring reads. */
    594 #define ANNOTATE_IGNORE_READS_END() \
    595    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
    596 
    597 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
    598 #define ANNOTATE_IGNORE_WRITES_BEGIN() \
    599    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
    600 
    601 /* Stop ignoring writes. */
    602 #define ANNOTATE_IGNORE_WRITES_END() \
    603    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
    604 
    605 /* Start ignoring all memory accesses (reads and writes). */
    606 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
    607    do { \
    608       ANNOTATE_IGNORE_READS_BEGIN(); \
    609       ANNOTATE_IGNORE_WRITES_BEGIN(); \
    610    } while (0)
    611 
    612 /* Stop ignoring all memory accesses. */
    613 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
    614    do { \
    615       ANNOTATE_IGNORE_WRITES_END(); \
    616       ANNOTATE_IGNORE_READS_END(); \
    617    } while (0)
    618 
    619 
    620 /* ----------------------------------------------------------------
    621    Annotations useful for debugging.
    622 
    623    Again, so for unimplemented, partly for performance reasons.
    624    ----------------------------------------------------------------
    625 */
    626 
    627 /* Request to trace every access to ADDRESS. */
    628 #define ANNOTATE_TRACE_MEMORY(address) \
    629    _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
    630 
    631 /* Report the current thread name to a race detector. */
    632 #define ANNOTATE_THREAD_NAME(name) \
    633    _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
    634 
    635 
    636 /* ----------------------------------------------------------------
    637    Annotations for describing behaviour of user-implemented lock
    638    primitives.  In all cases, the LOCK argument is a completely
    639    arbitrary machine word (unsigned long, or void*) and can be any
    640    value which gives a unique identity to the lock objects being
    641    modelled.
    642 
    643    We just pretend they're ordinary posix rwlocks.  That'll probably
    644    give some rather confusing wording in error messages, claiming that
    645    the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
    646    they are not.  Ah well.
    647    ----------------------------------------------------------------
    648 */
    649 /* Report that a lock has just been created at address LOCK. */
    650 #define ANNOTATE_RWLOCK_CREATE(lock)                         \
    651    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,     \
    652                void*,(lock))
    653 
    654 /* Report that the lock at address LOCK is about to be destroyed. */
    655 #define ANNOTATE_RWLOCK_DESTROY(lock)                        \
    656    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE,   \
    657                void*,(lock))
    658 
    659 /* Report that the lock at address LOCK has just been acquired.
    660    is_w=1 for writer lock, is_w=0 for reader lock. */
    661 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                 \
    662   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,     \
    663                void*,(lock), unsigned long,(is_w))
    664 
    665 /* Report that the lock at address LOCK is about to be released. */
    666 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)                 \
    667   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,     \
    668               void*,(lock)) /* is_w is ignored */
    669 
    670 
    671 /* -------------------------------------------------------------
    672    Annotations useful when implementing barriers.  They are not
    673    normally needed by modules that merely use barriers.
    674    The "barrier" argument is a pointer to the barrier object.
    675    ----------------------------------------------------------------
    676 */
    677 
    678 /* Report that the "barrier" has been initialized with initial
    679    "count".  If 'reinitialization_allowed' is true, initialization is
    680    allowed to happen multiple times w/o calling barrier_destroy() */
    681 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
    682    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
    683 
    684 /* Report that we are about to enter barrier_wait("barrier"). */
    685 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
    686    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    687 
    688 /* Report that we just exited barrier_wait("barrier"). */
    689 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
    690    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    691 
    692 /* Report that the "barrier" has been destroyed. */
    693 #define ANNOTATE_BARRIER_DESTROY(barrier) \
    694    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    695 
    696 
    697 /* ----------------------------------------------------------------
    698    Annotations useful for testing race detectors.
    699    ----------------------------------------------------------------
    700 */
    701 
    702 /* Report that we expect a race on the variable at ADDRESS.  Use only
    703    in unit tests for a race detector. */
    704 #define ANNOTATE_EXPECT_RACE(address, description) \
    705    _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
    706 
    707 /* A no-op. Insert where you like to test the interceptors. */
    708 #define ANNOTATE_NO_OP(arg) \
    709    _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
    710 
    711 /* Force the race detector to flush its state. The actual effect depends on
    712  * the implementation of the detector. */
    713 #define ANNOTATE_FLUSH_STATE() \
    714    _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
    715 
    716 #endif /* __HELGRIND_H */
    717