Home | History | Annotate | Download | only in helgrind
      1 /*
      2    ----------------------------------------------------------------
      3 
      4    Notice that the above BSD-style license applies to this one file
      5    (helgrind.h) only.  The entire rest of Valgrind is licensed under
      6    the terms of the GNU General Public License, version 2.  See the
      7    COPYING file in the source distribution for details.
      8 
      9    ----------------------------------------------------------------
     10 
     11    This file is part of Helgrind, a Valgrind tool for detecting errors
     12    in threaded programs.
     13 
     14    Copyright (C) 2007-2013 OpenWorks LLP
     15       info (at) open-works.co.uk
     16 
     17    Redistribution and use in source and binary forms, with or without
     18    modification, are permitted provided that the following conditions
     19    are met:
     20 
     21    1. Redistributions of source code must retain the above copyright
     22       notice, this list of conditions and the following disclaimer.
     23 
     24    2. The origin of this software must not be misrepresented; you must
     25       not claim that you wrote the original software.  If you use this
     26       software in a product, an acknowledgment in the product
     27       documentation would be appreciated but is not required.
     28 
     29    3. Altered source versions must be plainly marked as such, and must
     30       not be misrepresented as being the original software.
     31 
     32    4. The name of the author may not be used to endorse or promote
     33       products derived from this software without specific prior written
     34       permission.
     35 
     36    THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     37    OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     38    WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     39    ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
     40    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     41    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     42    GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     43    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     44    WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     45    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     46    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     47 
     48    ----------------------------------------------------------------
     49 
     50    Notice that the above BSD-style license applies to this one file
     51    (helgrind.h) only.  The entire rest of Valgrind is licensed under
     52    the terms of the GNU General Public License, version 2.  See the
     53    COPYING file in the source distribution for details.
     54 
     55    ----------------------------------------------------------------
     56 */
     57 
     58 #ifndef __HELGRIND_H
     59 #define __HELGRIND_H
     60 
     61 #include "valgrind.h"
     62 
     63 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
     64    This enum comprises an ABI exported by Valgrind to programs
     65    which use client requests.  DO NOT CHANGE THE ORDER OF THESE
     66    ENTRIES, NOR DELETE ANY -- add new ones at the end. */
     67 typedef
     68    enum {
     69       VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
     70 
     71       /* The rest are for Helgrind's internal use.  Not for end-user
     72          use.  Do not use them unless you are a Valgrind developer. */
     73 
     74       /* Notify the tool what this thread's pthread_t is. */
     75       _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
     76                                          + 256,
     77       _VG_USERREQ__HG_PTH_API_ERROR,              /* char*, int */
     78       _VG_USERREQ__HG_PTHREAD_JOIN_POST,          /* pthread_t of quitter */
     79       _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,    /* pth_mx_t*, long mbRec */
     80       _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,  /* pth_mx_t*, long isInit */
     81       _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,   /* pth_mx_t* */
     82       _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,  /* pth_mx_t* */
     83       _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */
     84       _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,    /* pth_mx_t* */
     85       _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE,    /* pth_cond_t* */
     86       _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
     87       _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,     /* pth_cond_t*, pth_mx_t* */
     88       _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,    /* pth_cond_t*, pth_mx_t* */
     89       _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE,   /* pth_cond_t*, long isInit */
     90       _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,   /* pth_rwlk_t* */
     91       _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
     92       _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE,    /* pth_rwlk_t*, long isW */
     93       _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,   /* pth_rwlk_t*, long isW */
     94       _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,  /* pth_rwlk_t* */
     95       _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
     96       _VG_USERREQ__HG_POSIX_SEM_INIT_POST,        /* sem_t*, ulong value */
     97       _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,      /* sem_t* */
     98       _VG_USERREQ__HG_POSIX_SEM_POST_PRE,         /* sem_t* */
     99       _VG_USERREQ__HG_POSIX_SEM_WAIT_POST,        /* sem_t* */
    100       _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   /* pth_bar_t*, ulong, ulong */
    101       _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,   /* pth_bar_t* */
    102       _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
    103       _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE,  /* pth_slk_t* */
    104       _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
    105       _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE,      /* pth_slk_t* */
    106       _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST,     /* pth_slk_t* */
    107       _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE,   /* pth_slk_t* */
    108       _VG_USERREQ__HG_CLIENTREQ_UNIMP,            /* char* */
    109       _VG_USERREQ__HG_USERSO_SEND_PRE,        /* arbitrary UWord SO-tag */
    110       _VG_USERREQ__HG_USERSO_RECV_POST,       /* arbitrary UWord SO-tag */
    111       _VG_USERREQ__HG_USERSO_FORGET_ALL,      /* arbitrary UWord SO-tag */
    112       _VG_USERREQ__HG_RESERVED2,              /* Do not use */
    113       _VG_USERREQ__HG_RESERVED3,              /* Do not use */
    114       _VG_USERREQ__HG_RESERVED4,              /* Do not use */
    115       _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
    116       _VG_USERREQ__HG_ARANGE_MAKE_TRACKED,   /* Addr a, ulong len */
    117       _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
    118       _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
    119       _VG_USERREQ__HG_PTHREAD_COND_INIT_POST  /* pth_cond_t*, pth_cond_attr_t*/
    120 
    121    } Vg_TCheckClientRequest;
    122 
    123 
    124 /*----------------------------------------------------------------*/
    125 /*---                                                          ---*/
    126 /*--- Implementation-only facilities.  Not for end-user use.   ---*/
    127 /*--- For end-user facilities see below (the next section in   ---*/
    128 /*--- this file.)                                              ---*/
    129 /*---                                                          ---*/
    130 /*----------------------------------------------------------------*/
    131 
    132 /* Do a client request.  These are macros rather than a functions so
    133    as to avoid having an extra frame in stack traces.
    134 
    135    NB: these duplicate definitions in hg_intercepts.c.  But here, we
    136    have to make do with weaker typing (no definition of Word etc) and
    137    no assertions, whereas in helgrind.h we can use those facilities.
    138    Obviously it's important the two sets of definitions are kept in
    139    sync.
    140 
    141    The commented-out asserts should actually hold, but unfortunately
    142    they can't be allowed to be visible here, because that would
    143    require the end-user code to #include <assert.h>.
    144 */
    145 
    146 #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F)                \
    147    do {                                                  \
    148       long int _arg1;                                    \
    149       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    150       _arg1 = (long int)(_arg1F);                        \
    151       VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
    152                                  (_creqF),               \
    153                                  _arg1, 0,0,0,0);        \
    154    } while (0)
    155 
    156 #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
    157    do {                                                  \
    158       long int arg1;                                     \
    159       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    160       _arg1 = (long int)(_arg1F);                        \
    161       _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
    162                                  (_dfltF),               \
    163                                  (_creqF),               \
    164                                  _arg1, 0,0,0,0);        \
    165       _resF = _qzz_res;                                  \
    166    } while (0)
    167 
    168 #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
    169    do {                                                  \
    170       long int _arg1, _arg2;                             \
    171       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    172       /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
    173       _arg1 = (long int)(_arg1F);                        \
    174       _arg2 = (long int)(_arg2F);                        \
    175       VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
    176                                  (_creqF),               \
    177                                  _arg1,_arg2,0,0,0);     \
    178    } while (0)
    179 
    180 #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F,              \
    181                       _ty2F,_arg2F, _ty3F, _arg3F)       \
    182    do {                                                  \
    183       long int _arg1, _arg2, _arg3;                      \
    184       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    185       /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
    186       /* assert(sizeof(_ty3F) == sizeof(long int)); */   \
    187       _arg1 = (long int)(_arg1F);                        \
    188       _arg2 = (long int)(_arg2F);                        \
    189       _arg3 = (long int)(_arg3F);                        \
    190       VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
    191                                  (_creqF),               \
    192                                  _arg1,_arg2,_arg3,0,0); \
    193    } while (0)
    194 
    195 
    196 #define _HG_CLIENTREQ_UNIMP(_qzz_str)                    \
    197    DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP,          \
    198                (char*),(_qzz_str))
    199 
    200 
    201 /*----------------------------------------------------------------*/
    202 /*---                                                          ---*/
    203 /*--- Helgrind-native requests.  These allow access to         ---*/
    204 /*--- the same set of annotation primitives that are used      ---*/
    205 /*--- to build the POSIX pthread wrappers.                     ---*/
    206 /*---                                                          ---*/
    207 /*----------------------------------------------------------------*/
    208 
    209 /* ----------------------------------------------------------
    210    For describing ordinary mutexes (non-rwlocks).  For rwlock
    211    descriptions see ANNOTATE_RWLOCK_* below.
    212    ---------------------------------------------------------- */
    213 
    214 /* Notify here immediately after mutex creation.  _mbRec == 0 for a
    215    non-recursive mutex, 1 for a recursive mutex. */
    216 #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec)          \
    217    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,     \
    218                 void*,(_mutex), long,(_mbRec))
    219 
    220 /* Notify here immediately before mutex acquisition.  _isTryLock == 0
    221    for a normal acquisition, 1 for a "try" style acquisition. */
    222 #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock)       \
    223    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE,      \
    224                 void*,(_mutex), long,(_isTryLock))
    225 
    226 /* Notify here immediately after a successful mutex acquisition. */
    227 #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex)                  \
    228    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,      \
    229                void*,(_mutex))
    230 
    231 /* Notify here immediately before a mutex release. */
    232 #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex)                 \
    233    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,     \
    234                void*,(_mutex))
    235 
    236 /* Notify here immediately after a mutex release. */
    237 #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex)                \
    238    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,    \
    239                void*,(_mutex))
    240 
    241 /* Notify here immediately before mutex destruction. */
    242 #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex)                \
    243    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,    \
    244                void*,(_mutex))
    245 
    246 /* ----------------------------------------------------------
    247    For describing semaphores.
    248    ---------------------------------------------------------- */
    249 
    250 /* Notify here immediately after semaphore creation. */
    251 #define VALGRIND_HG_SEM_INIT_POST(_sem, _value)              \
    252    DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST,         \
    253                 void*, (_sem), unsigned long, (_value))
    254 
    255 /* Notify here immediately after a semaphore wait (an acquire-style
    256    operation) */
    257 #define VALGRIND_HG_SEM_WAIT_POST(_sem)                      \
    258    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_WAIT_POST,          \
    259                void*,(_sem))
    260 
    261 /* Notify here immediately before semaphore post (a release-style
    262    operation) */
    263 #define VALGRIND_HG_SEM_POST_PRE(_sem)                       \
    264    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_POST_PRE,           \
    265                void*,(_sem))
    266 
    267 /* Notify here immediately before semaphore destruction. */
    268 #define VALGRIND_HG_SEM_DESTROY_PRE(_sem)                    \
    269    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,        \
    270                void*, (_sem))
    271 
    272 /* ----------------------------------------------------------
    273    For describing barriers.
    274    ---------------------------------------------------------- */
    275 
    276 /* Notify here immediately before barrier creation.  _count is the
    277    capacity.  _resizable == 0 means the barrier may not be resized, 1
    278    means it may be. */
    279 #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
    280    DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   \
    281                  void*,(_bar),                               \
    282                  unsigned long,(_count),                     \
    283                  unsigned long,(_resizable))
    284 
    285 /* Notify here immediately before arrival at a barrier. */
    286 #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar)                   \
    287    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,     \
    288                void*,(_bar))
    289 
    290 /* Notify here immediately before a resize (change of barrier
    291    capacity).  If _newcount >= the existing capacity, then there is no
    292    change in the state of any threads waiting at the barrier.  If
    293    _newcount < the existing capacity, and >= _newcount threads are
    294    currently waiting at the barrier, then this notification is
    295    considered to also have the effect of telling the checker that all
    296    waiting threads have now moved past the barrier.  (I can't think of
    297    any other sane semantics.) */
    298 #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount)      \
    299    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE,  \
    300                 void*,(_bar),                                \
    301                 unsigned long,(_newcount))
    302 
    303 /* Notify here immediately before barrier destruction. */
    304 #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar)                \
    305    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE,  \
    306                void*,(_bar))
    307 
    308 /* ----------------------------------------------------------
    309    For describing memory ownership changes.
    310    ---------------------------------------------------------- */
    311 
    312 /* Clean memory state.  This makes Helgrind forget everything it knew
    313    about the specified memory range.  Effectively this announces that
    314    the specified memory range now "belongs" to the calling thread, so
    315    that: (1) the calling thread can access it safely without
    316    synchronisation, and (2) all other threads must sync with this one
    317    to access it safely.  This is particularly useful for memory
    318    allocators that wish to recycle memory. */
    319 #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len)       \
    320    DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY,                 \
    321                 void*,(_qzz_start),                          \
    322                 unsigned long,(_qzz_len))
    323 
    324 /* The same, but for the heap block starting at _qzz_blockstart.  This
    325    allows painting when we only know the address of an object, but not
    326    its size, which is sometimes the case in C++ code involving
    327    inheritance, and in which RTTI is not, for whatever reason,
    328    available.  Returns the number of bytes painted, which can be zero
    329    for a zero-sized block.  Hence, return values >= 0 indicate success
    330    (the block was found), and the value -1 indicates block not
    331    found, and -2 is returned when not running on Helgrind. */
    332 #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart)  \
    333    (__extension__                                            \
    334    ({long int _npainted;                                     \
    335      DO_CREQ_W_W(_npainted, (-2)/*default*/,                 \
    336                  _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK,     \
    337                             void*,(_qzz_blockstart));        \
    338      _npainted;                                              \
    339    }))
    340 
    341 /* ----------------------------------------------------------
    342    For error control.
    343    ---------------------------------------------------------- */
    344 
    345 /* Tell H that an address range is not to be "tracked" until further
    346    notice.  This puts it in the NOACCESS state, in which case we
    347    ignore all reads and writes to it.  Useful for ignoring ranges of
    348    memory where there might be races we don't want to see.  If the
    349    memory is subsequently reallocated via malloc/new/stack allocation,
    350    then it is put back in the trackable state.  Hence it is safe in
    351    the situation where checking is disabled, the containing area is
    352    deallocated and later reallocated for some other purpose. */
    353 #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len)   \
    354    DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED,       \
    355                  void*,(_qzz_start),                         \
    356                  unsigned long,(_qzz_len))
    357 
    358 /* And put it back into the normal "tracked" state, that is, make it
    359    once again subject to the normal race-checking machinery.  This
    360    puts it in the same state as new memory allocated by this thread --
    361    that is, basically owned exclusively by this thread. */
    362 #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len)    \
    363    DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED,         \
    364                  void*,(_qzz_start),                         \
    365                  unsigned long,(_qzz_len))
    366 
    367 
    368 /*----------------------------------------------------------------*/
    369 /*---                                                          ---*/
    370 /*--- ThreadSanitizer-compatible requests                      ---*/
    371 /*--- (mostly unimplemented)                                   ---*/
    372 /*---                                                          ---*/
    373 /*----------------------------------------------------------------*/
    374 
    375 /* A quite-broad set of annotations, as used in the ThreadSanitizer
    376    project.  This implementation aims to be a (source-level)
    377    compatible implementation of the macros defined in:
    378 
    379    http://code.google.com/p/data-race-test/source
    380           /browse/trunk/dynamic_annotations/dynamic_annotations.h
    381 
    382    (some of the comments below are taken from the above file)
    383 
    384    The implementation here is very incomplete, and intended as a
    385    starting point.  Many of the macros are unimplemented.  Rather than
    386    allowing unimplemented macros to silently do nothing, they cause an
    387    assertion.  Intention is to implement them on demand.
    388 
    389    The major use of these macros is to make visible to race detectors,
    390    the behaviour (effects) of user-implemented synchronisation
    391    primitives, that the detectors could not otherwise deduce from the
    392    normal observation of pthread etc calls.
    393 
    394    Some of the macros are no-ops in Helgrind.  That's because Helgrind
    395    is a pure happens-before detector, whereas ThreadSanitizer uses a
    396    hybrid lockset and happens-before scheme, which requires more
    397    accurate annotations for correct operation.
    398 
    399    The macros are listed in the same order as in dynamic_annotations.h
    400    (URL just above).
    401 
    402    I should point out that I am less than clear about the intended
    403    semantics of quite a number of them.  Comments and clarifications
    404    welcomed!
    405 */
    406 
    407 /* ----------------------------------------------------------------
    408    These four allow description of user-level condition variables,
    409    apparently in the style of POSIX's pthread_cond_t.  Currently
    410    unimplemented and will assert.
    411    ----------------------------------------------------------------
    412 */
    413 /* Report that wait on the condition variable at address CV has
    414    succeeded and the lock at address LOCK is now held.  CV and LOCK
    415    are completely arbitrary memory addresses which presumably mean
    416    something to the application, but are meaningless to Helgrind. */
    417 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
    418    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
    419 
    420 /* Report that wait on the condition variable at CV has succeeded.
    421    Variant w/o lock. */
    422 #define ANNOTATE_CONDVAR_WAIT(cv) \
    423    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
    424 
    425 /* Report that we are about to signal on the condition variable at
    426    address CV. */
    427 #define ANNOTATE_CONDVAR_SIGNAL(cv) \
    428    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
    429 
    430 /* Report that we are about to signal_all on the condition variable at
    431    CV. */
    432 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
    433    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
    434 
    435 
    436 /* ----------------------------------------------------------------
    437    Create completely arbitrary happens-before edges between threads.
    438 
    439    If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
    440    (w.r.t. some notional global clock for the computation) thread Tm
    441    does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
    442    memory accesses done by T1 .. Tn before the ..BEFORE.. call as
    443    happening-before all memory accesses done by Tm after the
    444    ..AFTER.. call.  Hence Helgrind won't complain about races if Tm's
    445    accesses afterwards are to the same locations as accesses before by
    446    any of T1 .. Tn.
    447 
    448    OBJ is a machine word (unsigned long, or void*), is completely
    449    arbitrary, and denotes the identity of some synchronisation object
    450    you're modelling.
    451 
    452    You must do the _BEFORE call just before the real sync event on the
    453    signaller's side, and _AFTER just after the real sync event on the
    454    waiter's side.
    455 
    456    If none of the rest of these macros make sense to you, at least
    457    take the time to understand these two.  They form the very essence
    458    of describing arbitrary inter-thread synchronisation events to
    459    Helgrind.  You can get a long way just with them alone.
    460 
    461    See also, extensive discussion on semantics of this in
    462    https://bugs.kde.org/show_bug.cgi?id=243935
    463 
    464    ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
    465    as bug 243935 is fully resolved.  It instructs Helgrind to forget
    466    about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
    467    effect putting it back in its original state.  Once in that state,
    468    a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
    469    thread.
    470 
    471    An implementation may optionally release resources it has
    472    associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
    473    happens.  Users are recommended to use
    474    ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
    475    synchronisation object is no longer needed, so as to avoid
    476    potential indefinite resource leaks.
    477    ----------------------------------------------------------------
    478 */
    479 #define ANNOTATE_HAPPENS_BEFORE(obj) \
    480    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
    481 
    482 #define ANNOTATE_HAPPENS_AFTER(obj) \
    483    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
    484 
    485 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
    486    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
    487 
    488 /* ----------------------------------------------------------------
    489    Memory publishing.  The TSan sources say:
    490 
    491      Report that the bytes in the range [pointer, pointer+size) are about
    492      to be published safely. The race checker will create a happens-before
    493      arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
    494      subsequent accesses to this memory.
    495 
    496    I'm not sure I understand what this means exactly, nor whether it
    497    is relevant for a pure h-b detector.  Leaving unimplemented for
    498    now.
    499    ----------------------------------------------------------------
    500 */
    501 #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
    502    _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
    503 
    504 /* DEPRECATED. Don't use it. */
    505 /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
    506 
    507 /* DEPRECATED. Don't use it. */
    508 /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
    509 
    510 
    511 /* ----------------------------------------------------------------
    512    TSan sources say:
    513 
    514      Instruct the tool to create a happens-before arc between
    515      MU->Unlock() and MU->Lock().  This annotation may slow down the
    516      race detector; normally it is used only when it would be
    517      difficult to annotate each of the mutex's critical sections
    518      individually using the annotations above.
    519 
    520    If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
    521    In any case, leave as unimp for now.  I'm unsure about the intended
    522    behaviour.
    523    ----------------------------------------------------------------
    524 */
    525 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
    526    _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
    527 
    528 /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
    529 /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
    530 
    531 
    532 /* ----------------------------------------------------------------
    533    TSan sources say:
    534 
    535      Annotations useful when defining memory allocators, or when
    536      memory that was protected in one way starts to be protected in
    537      another.
    538 
    539      Report that a new memory at "address" of size "size" has been
    540      allocated.  This might be used when the memory has been retrieved
    541      from a free list and is about to be reused, or when a the locking
    542      discipline for a variable changes.
    543 
    544    AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
    545    ----------------------------------------------------------------
    546 */
    547 #define ANNOTATE_NEW_MEMORY(address, size) \
    548    VALGRIND_HG_CLEAN_MEMORY((address), (size))
    549 
    550 
    551 /* ----------------------------------------------------------------
    552    TSan sources say:
    553 
    554      Annotations useful when defining FIFO queues that transfer data
    555      between threads.
    556 
    557    All unimplemented.  Am not claiming to understand this (yet).
    558    ----------------------------------------------------------------
    559 */
    560 
    561 /* Report that the producer-consumer queue object at address PCQ has
    562    been created.  The ANNOTATE_PCQ_* annotations should be used only
    563    for FIFO queues.  For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
    564    (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
    565 #define ANNOTATE_PCQ_CREATE(pcq) \
    566    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
    567 
    568 /* Report that the queue at address PCQ is about to be destroyed. */
    569 #define ANNOTATE_PCQ_DESTROY(pcq) \
    570    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
    571 
    572 /* Report that we are about to put an element into a FIFO queue at
    573    address PCQ. */
    574 #define ANNOTATE_PCQ_PUT(pcq) \
    575    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
    576 
    577 /* Report that we've just got an element from a FIFO queue at address
    578    PCQ. */
    579 #define ANNOTATE_PCQ_GET(pcq) \
    580    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
    581 
    582 
    583 /* ----------------------------------------------------------------
    584    Annotations that suppress errors.  It is usually better to express
    585    the program's synchronization using the other annotations, but
    586    these can be used when all else fails.
    587 
    588    Currently these are all unimplemented.  I can't think of a simple
    589    way to implement them without at least some performance overhead.
    590    ----------------------------------------------------------------
    591 */
    592 
    593 /* Report that we may have a benign race at "pointer", with size
    594    "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
    595    point where "pointer" has been allocated, preferably close to the point
    596    where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC.
    597 
    598    XXX: what's this actually supposed to do?  And what's the type of
    599    DESCRIPTION?  When does the annotation stop having an effect?
    600 */
    601 #define ANNOTATE_BENIGN_RACE(pointer, description) \
    602    _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
    603 
    604 /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
    605    the memory range [address, address+size). */
    606 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
    607    VALGRIND_HG_DISABLE_CHECKING(address, size)
    608 
    609 /* Request the analysis tool to ignore all reads in the current thread
    610    until ANNOTATE_IGNORE_READS_END is called.  Useful to ignore
    611    intentional racey reads, while still checking other reads and all
    612    writes. */
    613 #define ANNOTATE_IGNORE_READS_BEGIN() \
    614    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
    615 
    616 /* Stop ignoring reads. */
    617 #define ANNOTATE_IGNORE_READS_END() \
    618    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
    619 
    620 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
    621 #define ANNOTATE_IGNORE_WRITES_BEGIN() \
    622    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
    623 
    624 /* Stop ignoring writes. */
    625 #define ANNOTATE_IGNORE_WRITES_END() \
    626    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
    627 
    628 /* Start ignoring all memory accesses (reads and writes). */
    629 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
    630    do { \
    631       ANNOTATE_IGNORE_READS_BEGIN(); \
    632       ANNOTATE_IGNORE_WRITES_BEGIN(); \
    633    } while (0)
    634 
    635 /* Stop ignoring all memory accesses. */
    636 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
    637    do { \
    638       ANNOTATE_IGNORE_WRITES_END(); \
    639       ANNOTATE_IGNORE_READS_END(); \
    640    } while (0)
    641 
    642 
    643 /* ----------------------------------------------------------------
    644    Annotations useful for debugging.
    645 
    646    Again, so for unimplemented, partly for performance reasons.
    647    ----------------------------------------------------------------
    648 */
    649 
    650 /* Request to trace every access to ADDRESS. */
    651 #define ANNOTATE_TRACE_MEMORY(address) \
    652    _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
    653 
    654 /* Report the current thread name to a race detector. */
    655 #define ANNOTATE_THREAD_NAME(name) \
    656    _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
    657 
    658 
    659 /* ----------------------------------------------------------------
    660    Annotations for describing behaviour of user-implemented lock
    661    primitives.  In all cases, the LOCK argument is a completely
    662    arbitrary machine word (unsigned long, or void*) and can be any
    663    value which gives a unique identity to the lock objects being
    664    modelled.
    665 
    666    We just pretend they're ordinary posix rwlocks.  That'll probably
    667    give some rather confusing wording in error messages, claiming that
    668    the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
    669    they are not.  Ah well.
    670    ----------------------------------------------------------------
    671 */
    672 /* Report that a lock has just been created at address LOCK. */
    673 #define ANNOTATE_RWLOCK_CREATE(lock)                         \
    674    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,     \
    675                void*,(lock))
    676 
    677 /* Report that the lock at address LOCK is about to be destroyed. */
    678 #define ANNOTATE_RWLOCK_DESTROY(lock)                        \
    679    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE,   \
    680                void*,(lock))
    681 
    682 /* Report that the lock at address LOCK has just been acquired.
    683    is_w=1 for writer lock, is_w=0 for reader lock. */
    684 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                 \
    685   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,     \
    686                void*,(lock), unsigned long,(is_w))
    687 
    688 /* Report that the lock at address LOCK is about to be released. */
    689 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)                 \
    690   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,     \
    691               void*,(lock)) /* is_w is ignored */
    692 
    693 
    694 /* -------------------------------------------------------------
    695    Annotations useful when implementing barriers.  They are not
    696    normally needed by modules that merely use barriers.
    697    The "barrier" argument is a pointer to the barrier object.
    698    ----------------------------------------------------------------
    699 */
    700 
    701 /* Report that the "barrier" has been initialized with initial
    702    "count".  If 'reinitialization_allowed' is true, initialization is
    703    allowed to happen multiple times w/o calling barrier_destroy() */
    704 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
    705    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
    706 
    707 /* Report that we are about to enter barrier_wait("barrier"). */
    708 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
    709    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    710 
    711 /* Report that we just exited barrier_wait("barrier"). */
    712 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
    713    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    714 
    715 /* Report that the "barrier" has been destroyed. */
    716 #define ANNOTATE_BARRIER_DESTROY(barrier) \
    717    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    718 
    719 
    720 /* ----------------------------------------------------------------
    721    Annotations useful for testing race detectors.
    722    ----------------------------------------------------------------
    723 */
    724 
    725 /* Report that we expect a race on the variable at ADDRESS.  Use only
    726    in unit tests for a race detector. */
    727 #define ANNOTATE_EXPECT_RACE(address, description) \
    728    _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
    729 
    730 /* A no-op. Insert where you like to test the interceptors. */
    731 #define ANNOTATE_NO_OP(arg) \
    732    _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
    733 
    734 /* Force the race detector to flush its state. The actual effect depends on
    735  * the implementation of the detector. */
    736 #define ANNOTATE_FLUSH_STATE() \
    737    _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
    738 
    739 #endif /* __HELGRIND_H */
    740