Home | History | Annotate | Download | only in helgrind
      1 /*
      2    ----------------------------------------------------------------
      3 
      4    Notice that the above BSD-style license applies to this one file
      5    (helgrind.h) only.  The entire rest of Valgrind is licensed under
      6    the terms of the GNU General Public License, version 2.  See the
      7    COPYING file in the source distribution for details.
      8 
      9    ----------------------------------------------------------------
     10 
     11    This file is part of Helgrind, a Valgrind tool for detecting errors
     12    in threaded programs.
     13 
     14    Copyright (C) 2007-2017 OpenWorks LLP
     15       info (at) open-works.co.uk
     16 
     17    Redistribution and use in source and binary forms, with or without
     18    modification, are permitted provided that the following conditions
     19    are met:
     20 
     21    1. Redistributions of source code must retain the above copyright
     22       notice, this list of conditions and the following disclaimer.
     23 
     24    2. The origin of this software must not be misrepresented; you must
     25       not claim that you wrote the original software.  If you use this
     26       software in a product, an acknowledgment in the product
     27       documentation would be appreciated but is not required.
     28 
     29    3. Altered source versions must be plainly marked as such, and must
     30       not be misrepresented as being the original software.
     31 
     32    4. The name of the author may not be used to endorse or promote
     33       products derived from this software without specific prior written
     34       permission.
     35 
     36    THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     37    OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     38    WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     39    ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
     40    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     41    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     42    GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     43    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     44    WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     45    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     46    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     47 
     48    ----------------------------------------------------------------
     49 
     50    Notice that the above BSD-style license applies to this one file
     51    (helgrind.h) only.  The entire rest of Valgrind is licensed under
     52    the terms of the GNU General Public License, version 2.  See the
     53    COPYING file in the source distribution for details.
     54 
     55    ----------------------------------------------------------------
     56 */
     57 
     58 #ifndef __HELGRIND_H
     59 #define __HELGRIND_H
     60 
     61 #include "valgrind.h"
     62 
     63 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
     64    This enum comprises an ABI exported by Valgrind to programs
     65    which use client requests.  DO NOT CHANGE THE ORDER OF THESE
     66    ENTRIES, NOR DELETE ANY -- add new ones at the end. */
     67 typedef
     68    enum {
     69       VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
     70 
     71       /* The rest are for Helgrind's internal use.  Not for end-user
     72          use.  Do not use them unless you are a Valgrind developer. */
     73 
     74       /* Notify the tool what this thread's pthread_t is. */
     75       _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
     76                                          + 256,
     77       _VG_USERREQ__HG_PTH_API_ERROR,              /* char*, int */
     78       _VG_USERREQ__HG_PTHREAD_JOIN_POST,          /* pthread_t of quitter */
     79       _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,    /* pth_mx_t*, long mbRec */
     80       _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,  /* pth_mx_t*, long isInit */
     81       _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,   /* pth_mx_t* */
     82       _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,  /* pth_mx_t* */
     83       _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE,  /* void*, long isTryLock */
     84       _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */
     85       _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE,    /* pth_cond_t* */
     86       _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
     87       _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,     /* pth_cond_t*, pth_mx_t* */
     88       _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,    /* pth_cond_t*, pth_mx_t* */
     89       _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE,   /* pth_cond_t*, long isInit */
     90       _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,   /* pth_rwlk_t* */
     91       _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
     92       _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE,    /* pth_rwlk_t*, long isW */
     93       _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED,    /* void*, long isW */
     94       _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED,    /* void* */
     95       _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
     96       _VG_USERREQ__HG_POSIX_SEM_INIT_POST,        /* sem_t*, ulong value */
     97       _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,      /* sem_t* */
     98       _VG_USERREQ__HG_POSIX_SEM_RELEASED,         /* void* */
     99       _VG_USERREQ__HG_POSIX_SEM_ACQUIRED,         /* void* */
    100       _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   /* pth_bar_t*, ulong, ulong */
    101       _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,   /* pth_bar_t* */
    102       _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
    103       _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE,  /* pth_slk_t* */
    104       _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
    105       _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE,      /* pth_slk_t* */
    106       _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST,     /* pth_slk_t* */
    107       _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE,   /* pth_slk_t* */
    108       _VG_USERREQ__HG_CLIENTREQ_UNIMP,            /* char* */
    109       _VG_USERREQ__HG_USERSO_SEND_PRE,        /* arbitrary UWord SO-tag */
    110       _VG_USERREQ__HG_USERSO_RECV_POST,       /* arbitrary UWord SO-tag */
    111       _VG_USERREQ__HG_USERSO_FORGET_ALL,      /* arbitrary UWord SO-tag */
    112       _VG_USERREQ__HG_RESERVED2,              /* Do not use */
    113       _VG_USERREQ__HG_RESERVED3,              /* Do not use */
    114       _VG_USERREQ__HG_RESERVED4,              /* Do not use */
    115       _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
    116       _VG_USERREQ__HG_ARANGE_MAKE_TRACKED,   /* Addr a, ulong len */
    117       _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
    118       _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
    119       _VG_USERREQ__HG_PTHREAD_COND_INIT_POST,  /* pth_cond_t*, pth_cond_attr_t*/
    120       _VG_USERREQ__HG_GNAT_MASTER_HOOK,       /* void*d,void*m,Word ml */
    121       _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */
    122       _VG_USERREQ__HG_GET_ABITS,              /* Addr a,Addr abits, ulong len */
    123       _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN,
    124       _VG_USERREQ__HG_PTHREAD_CREATE_END,
    125       _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE,     /* pth_mx_t*,long isTryLock */
    126       _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,    /* pth_mx_t *,long tookLock */
    127       _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,  /* pth_rwlk_t*,long isW,long */
    128       _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,  /* pth_rwlk_t* */
    129       _VG_USERREQ__HG_POSIX_SEM_POST_PRE,         /* sem_t* */
    130       _VG_USERREQ__HG_POSIX_SEM_POST_POST,        /* sem_t* */
    131       _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE,         /* sem_t* */
    132       _VG_USERREQ__HG_POSIX_SEM_WAIT_POST,        /* sem_t*, long tookLock */
    133       _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST,   /* pth_cond_t* */
    134       _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */
    135       _VG_USERREQ__HG_RTLD_BIND_GUARD,            /* int flags */
    136       _VG_USERREQ__HG_RTLD_BIND_CLEAR,            /* int flags */
    137       _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN  /* void*d, void*m */
    138    } Vg_TCheckClientRequest;
    139 
    140 
    141 /*----------------------------------------------------------------*/
    142 /*---                                                          ---*/
    143 /*--- Implementation-only facilities.  Not for end-user use.   ---*/
    144 /*--- For end-user facilities see below (the next section in   ---*/
    145 /*--- this file.)                                              ---*/
    146 /*---                                                          ---*/
    147 /*----------------------------------------------------------------*/
    148 
    149 /* Do a client request.  These are macros rather than a functions so
    150    as to avoid having an extra frame in stack traces.
    151 
    152    NB: these duplicate definitions in hg_intercepts.c.  But here, we
    153    have to make do with weaker typing (no definition of Word etc) and
    154    no assertions, whereas in helgrind.h we can use those facilities.
    155    Obviously it's important the two sets of definitions are kept in
    156    sync.
    157 
    158    The commented-out asserts should actually hold, but unfortunately
    159    they can't be allowed to be visible here, because that would
    160    require the end-user code to #include <assert.h>.
    161 */
    162 
    163 #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F)                \
    164    do {                                                  \
    165       long int _arg1;                                    \
    166       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    167       _arg1 = (long int)(_arg1F);                        \
    168       VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
    169                                  (_creqF),               \
    170                                  _arg1, 0,0,0,0);        \
    171    } while (0)
    172 
    173 #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
    174    do {                                                  \
    175       long int _arg1;                                    \
    176       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    177       _arg1 = (long int)(_arg1F);                        \
    178       _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
    179                                  (_dfltF),               \
    180                                  (_creqF),               \
    181                                  _arg1, 0,0,0,0);        \
    182       _resF = _qzz_res;                                  \
    183    } while (0)
    184 
    185 #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
    186    do {                                                  \
    187       long int _arg1, _arg2;                             \
    188       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    189       /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
    190       _arg1 = (long int)(_arg1F);                        \
    191       _arg2 = (long int)(_arg2F);                        \
    192       VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
    193                                  (_creqF),               \
    194                                  _arg1,_arg2,0,0,0);     \
    195    } while (0)
    196 
    197 #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F,              \
    198                       _ty2F,_arg2F, _ty3F, _arg3F)       \
    199    do {                                                  \
    200       long int _arg1, _arg2, _arg3;                      \
    201       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    202       /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
    203       /* assert(sizeof(_ty3F) == sizeof(long int)); */   \
    204       _arg1 = (long int)(_arg1F);                        \
    205       _arg2 = (long int)(_arg2F);                        \
    206       _arg3 = (long int)(_arg3F);                        \
    207       VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
    208                                  (_creqF),               \
    209                                  _arg1,_arg2,_arg3,0,0); \
    210    } while (0)
    211 
    212 #define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \
    213                       _ty2F,_arg2F, _ty3F, _arg3F)       \
    214    do {                                                  \
    215       long int _qzz_res;                                 \
    216       long int _arg1, _arg2, _arg3;                      \
    217       /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
    218       _arg1 = (long int)(_arg1F);                        \
    219       _arg2 = (long int)(_arg2F);                        \
    220       _arg3 = (long int)(_arg3F);                        \
    221       _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
    222                                  (_dfltF),               \
    223                                  (_creqF),               \
    224                                  _arg1,_arg2,_arg3,0,0); \
    225       _resF = _qzz_res;                                  \
    226    } while (0)
    227 
    228 
    229 
    230 #define _HG_CLIENTREQ_UNIMP(_qzz_str)                    \
    231    DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP,          \
    232                (char*),(_qzz_str))
    233 
    234 
    235 /*----------------------------------------------------------------*/
    236 /*---                                                          ---*/
    237 /*--- Helgrind-native requests.  These allow access to         ---*/
    238 /*--- the same set of annotation primitives that are used      ---*/
    239 /*--- to build the POSIX pthread wrappers.                     ---*/
    240 /*---                                                          ---*/
    241 /*----------------------------------------------------------------*/
    242 
    243 /* ----------------------------------------------------------
    244    For describing ordinary mutexes (non-rwlocks).  For rwlock
    245    descriptions see ANNOTATE_RWLOCK_* below.
    246    ---------------------------------------------------------- */
    247 
    248 /* Notify here immediately after mutex creation.  _mbRec == 0 for a
    249    non-recursive mutex, 1 for a recursive mutex. */
    250 #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec)          \
    251    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,     \
    252                 void*,(_mutex), long,(_mbRec))
    253 
    254 /* Notify here immediately before mutex acquisition.  _isTryLock == 0
    255    for a normal acquisition, 1 for a "try" style acquisition. */
    256 #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock)       \
    257    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE,   \
    258                 void*,(_mutex), long,(_isTryLock))
    259 
    260 /* Notify here immediately after a successful mutex acquisition. */
    261 #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex)                  \
    262    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST,   \
    263                void*,(_mutex))
    264 
    265 /* Notify here immediately before a mutex release. */
    266 #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex)                 \
    267    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,     \
    268                void*,(_mutex))
    269 
    270 /* Notify here immediately after a mutex release. */
    271 #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex)                \
    272    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,    \
    273                void*,(_mutex))
    274 
    275 /* Notify here immediately before mutex destruction. */
    276 #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex)                \
    277    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,    \
    278                void*,(_mutex))
    279 
    280 /* ----------------------------------------------------------
    281    For describing semaphores.
    282    ---------------------------------------------------------- */
    283 
    284 /* Notify here immediately after semaphore creation. */
    285 #define VALGRIND_HG_SEM_INIT_POST(_sem, _value)              \
    286    DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST,         \
    287                 void*, (_sem), unsigned long, (_value))
    288 
    289 /* Notify here immediately after a semaphore wait (an acquire-style
    290    operation) */
    291 #define VALGRIND_HG_SEM_WAIT_POST(_sem)                      \
    292    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED,           \
    293                void*,(_sem))
    294 
    295 /* Notify here immediately before semaphore post (a release-style
    296    operation) */
    297 #define VALGRIND_HG_SEM_POST_PRE(_sem)                       \
    298    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED,           \
    299                void*,(_sem))
    300 
    301 /* Notify here immediately before semaphore destruction. */
    302 #define VALGRIND_HG_SEM_DESTROY_PRE(_sem)                    \
    303    DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,        \
    304                void*, (_sem))
    305 
    306 /* ----------------------------------------------------------
    307    For describing barriers.
    308    ---------------------------------------------------------- */
    309 
    310 /* Notify here immediately before barrier creation.  _count is the
    311    capacity.  _resizable == 0 means the barrier may not be resized, 1
    312    means it may be. */
    313 #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
    314    DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   \
    315                  void*,(_bar),                               \
    316                  unsigned long,(_count),                     \
    317                  unsigned long,(_resizable))
    318 
    319 /* Notify here immediately before arrival at a barrier. */
    320 #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar)                   \
    321    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,     \
    322                void*,(_bar))
    323 
    324 /* Notify here immediately before a resize (change of barrier
    325    capacity).  If _newcount >= the existing capacity, then there is no
    326    change in the state of any threads waiting at the barrier.  If
    327    _newcount < the existing capacity, and >= _newcount threads are
    328    currently waiting at the barrier, then this notification is
    329    considered to also have the effect of telling the checker that all
    330    waiting threads have now moved past the barrier.  (I can't think of
    331    any other sane semantics.) */
    332 #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount)      \
    333    DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE,  \
    334                 void*,(_bar),                                \
    335                 unsigned long,(_newcount))
    336 
    337 /* Notify here immediately before barrier destruction. */
    338 #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar)                \
    339    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE,  \
    340                void*,(_bar))
    341 
    342 /* ----------------------------------------------------------
    343    For describing memory ownership changes.
    344    ---------------------------------------------------------- */
    345 
    346 /* Clean memory state.  This makes Helgrind forget everything it knew
    347    about the specified memory range.  Effectively this announces that
    348    the specified memory range now "belongs" to the calling thread, so
    349    that: (1) the calling thread can access it safely without
    350    synchronisation, and (2) all other threads must sync with this one
    351    to access it safely.  This is particularly useful for memory
    352    allocators that wish to recycle memory. */
    353 #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len)       \
    354    DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY,                 \
    355                 void*,(_qzz_start),                          \
    356                 unsigned long,(_qzz_len))
    357 
    358 /* The same, but for the heap block starting at _qzz_blockstart.  This
    359    allows painting when we only know the address of an object, but not
    360    its size, which is sometimes the case in C++ code involving
    361    inheritance, and in which RTTI is not, for whatever reason,
    362    available.  Returns the number of bytes painted, which can be zero
    363    for a zero-sized block.  Hence, return values >= 0 indicate success
    364    (the block was found), and the value -1 indicates block not
    365    found, and -2 is returned when not running on Helgrind. */
    366 #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart)  \
    367    (__extension__                                            \
    368    ({long int _npainted;                                     \
    369      DO_CREQ_W_W(_npainted, (-2)/*default*/,                 \
    370                  _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK,     \
    371                             void*,(_qzz_blockstart));        \
    372      _npainted;                                              \
    373    }))
    374 
    375 /* ----------------------------------------------------------
    376    For error control.
    377    ---------------------------------------------------------- */
    378 
    379 /* Tell H that an address range is not to be "tracked" until further
    380    notice.  This puts it in the NOACCESS state, in which case we
    381    ignore all reads and writes to it.  Useful for ignoring ranges of
    382    memory where there might be races we don't want to see.  If the
    383    memory is subsequently reallocated via malloc/new/stack allocation,
    384    then it is put back in the trackable state.  Hence it is safe in
    385    the situation where checking is disabled, the containing area is
    386    deallocated and later reallocated for some other purpose. */
    387 #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len)   \
    388    DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED,       \
    389                  void*,(_qzz_start),                         \
    390                  unsigned long,(_qzz_len))
    391 
    392 /* And put it back into the normal "tracked" state, that is, make it
    393    once again subject to the normal race-checking machinery.  This
    394    puts it in the same state as new memory allocated by this thread --
    395    that is, basically owned exclusively by this thread. */
    396 #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len)    \
    397    DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED,         \
    398                  void*,(_qzz_start),                         \
    399                  unsigned long,(_qzz_len))
    400 
    401 
    402 /*  Checks the accessibility bits for addresses [zza..zza+zznbytes-1].
    403     If zzabits array is provided, copy the accessibility bits in zzabits.
    404    Return values:
    405      -2   if not running on helgrind
    406      -1   if any parts of zzabits is not addressable
    407      >= 0 : success.
    408    When success, it returns the nr of addressable bytes found.
    409       So, to check that a whole range is addressable, check
    410          VALGRIND_HG_GET_ABITS(addr,NULL,len) == len
    411       In addition, if you want to examine the addressability of each
    412       byte of the range, you need to provide a non NULL ptr as
    413       second argument, pointing to an array of unsigned char
    414       of length len.
    415       Addressable bytes are indicated with 0xff.
    416       Non-addressable bytes are indicated with 0x00.
    417 */
    418 #define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes)          \
    419    (__extension__                                            \
    420    ({long int _res;                                          \
    421       DO_CREQ_W_WWW(_res, (-2)/*default*/,                   \
    422                     _VG_USERREQ__HG_GET_ABITS,               \
    423                     void*,(zza), void*,(zzabits),            \
    424                     unsigned long,(zznbytes));               \
    425       _res;                                                  \
    426    }))
    427 
    428 /* End-user request for Ada applications compiled with GNAT.
    429    Helgrind understands the Ada concept of Ada task dependencies and
    430    terminations. See Ada Reference Manual section 9.3 "Task Dependence
    431    - Termination of Tasks".
    432    However, in some cases, the master of (terminated) tasks completes
    433    only when the application exits. An example of this is dynamically
    434    allocated tasks with an access type defined at Library Level.
    435    By default, the state of such tasks in Helgrind will be 'exited but
    436    join not done yet'. Many tasks in such a state are however causing
    437    Helgrind CPU and memory to increase significantly.
    438    VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN can be used to indicate
    439    to Helgrind that a not yet completed master has however already
    440    'seen' the termination of a dependent : this is conceptually the
    441    same as a pthread_join and causes the cleanup of the dependent
    442    as done by Helgrind when a master completes.
    443    This allows to avoid the overhead in helgrind caused by such tasks.
    444    A typical usage for a master to indicate it has done conceptually a join
    445    with a dependent task before the master completes is:
    446       while not Dep_Task'Terminated loop
    447          ... do whatever to wait for Dep_Task termination.
    448       end loop;
    449       VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN
    450         (Dep_Task'Identity,
    451          Ada.Task_Identification.Current_Task);
    452     Note that VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN should be a binding
    453     to a C function built with the below macro. */
    454 #define VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN(_qzz_dep, _qzz_master) \
    455    DO_CREQ_v_WW(_VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN,           \
    456                 void*,(_qzz_dep),                                     \
    457                 void*,(_qzz_master))
    458 
    459 /*----------------------------------------------------------------*/
    460 /*---                                                          ---*/
    461 /*--- ThreadSanitizer-compatible requests                      ---*/
    462 /*--- (mostly unimplemented)                                   ---*/
    463 /*---                                                          ---*/
    464 /*----------------------------------------------------------------*/
    465 
    466 /* A quite-broad set of annotations, as used in the ThreadSanitizer
    467    project.  This implementation aims to be a (source-level)
    468    compatible implementation of the macros defined in:
    469 
    470    http://code.google.com/p/data-race-test/source
    471           /browse/trunk/dynamic_annotations/dynamic_annotations.h
    472 
    473    (some of the comments below are taken from the above file)
    474 
    475    The implementation here is very incomplete, and intended as a
    476    starting point.  Many of the macros are unimplemented.  Rather than
    477    allowing unimplemented macros to silently do nothing, they cause an
    478    assertion.  Intention is to implement them on demand.
    479 
    480    The major use of these macros is to make visible to race detectors,
    481    the behaviour (effects) of user-implemented synchronisation
    482    primitives, that the detectors could not otherwise deduce from the
    483    normal observation of pthread etc calls.
    484 
    485    Some of the macros are no-ops in Helgrind.  That's because Helgrind
    486    is a pure happens-before detector, whereas ThreadSanitizer uses a
    487    hybrid lockset and happens-before scheme, which requires more
    488    accurate annotations for correct operation.
    489 
    490    The macros are listed in the same order as in dynamic_annotations.h
    491    (URL just above).
    492 
    493    I should point out that I am less than clear about the intended
    494    semantics of quite a number of them.  Comments and clarifications
    495    welcomed!
    496 */
    497 
    498 /* ----------------------------------------------------------------
    499    These four allow description of user-level condition variables,
    500    apparently in the style of POSIX's pthread_cond_t.  Currently
    501    unimplemented and will assert.
    502    ----------------------------------------------------------------
    503 */
    504 /* Report that wait on the condition variable at address CV has
    505    succeeded and the lock at address LOCK is now held.  CV and LOCK
    506    are completely arbitrary memory addresses which presumably mean
    507    something to the application, but are meaningless to Helgrind. */
    508 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
    509    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
    510 
    511 /* Report that wait on the condition variable at CV has succeeded.
    512    Variant w/o lock. */
    513 #define ANNOTATE_CONDVAR_WAIT(cv) \
    514    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
    515 
    516 /* Report that we are about to signal on the condition variable at
    517    address CV. */
    518 #define ANNOTATE_CONDVAR_SIGNAL(cv) \
    519    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
    520 
    521 /* Report that we are about to signal_all on the condition variable at
    522    CV. */
    523 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
    524    _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
    525 
    526 
    527 /* ----------------------------------------------------------------
    528    Create completely arbitrary happens-before edges between threads.
    529 
    530    If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
    531    (w.r.t. some notional global clock for the computation) thread Tm
    532    does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
    533    memory accesses done by T1 .. Tn before the ..BEFORE.. call as
    534    happening-before all memory accesses done by Tm after the
    535    ..AFTER.. call.  Hence Helgrind won't complain about races if Tm's
    536    accesses afterwards are to the same locations as accesses before by
    537    any of T1 .. Tn.
    538 
    539    OBJ is a machine word (unsigned long, or void*), is completely
    540    arbitrary, and denotes the identity of some synchronisation object
    541    you're modelling.
    542 
    543    You must do the _BEFORE call just before the real sync event on the
    544    signaller's side, and _AFTER just after the real sync event on the
    545    waiter's side.
    546 
    547    If none of the rest of these macros make sense to you, at least
    548    take the time to understand these two.  They form the very essence
    549    of describing arbitrary inter-thread synchronisation events to
    550    Helgrind.  You can get a long way just with them alone.
    551 
    552    See also, extensive discussion on semantics of this in
    553    https://bugs.kde.org/show_bug.cgi?id=243935
    554 
    555    ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
    556    as bug 243935 is fully resolved.  It instructs Helgrind to forget
    557    about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
    558    effect putting it back in its original state.  Once in that state,
    559    a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
    560    thread.
    561 
    562    An implementation may optionally release resources it has
    563    associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
    564    happens.  Users are recommended to use
    565    ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
    566    synchronisation object is no longer needed, so as to avoid
    567    potential indefinite resource leaks.
    568    ----------------------------------------------------------------
    569 */
    570 #define ANNOTATE_HAPPENS_BEFORE(obj) \
    571    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
    572 
    573 #define ANNOTATE_HAPPENS_AFTER(obj) \
    574    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
    575 
    576 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
    577    DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
    578 
    579 /* ----------------------------------------------------------------
    580    Memory publishing.  The TSan sources say:
    581 
    582      Report that the bytes in the range [pointer, pointer+size) are about
    583      to be published safely. The race checker will create a happens-before
    584      arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
    585      subsequent accesses to this memory.
    586 
    587    I'm not sure I understand what this means exactly, nor whether it
    588    is relevant for a pure h-b detector.  Leaving unimplemented for
    589    now.
    590    ----------------------------------------------------------------
    591 */
    592 #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
    593    _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
    594 
    595 /* DEPRECATED. Don't use it. */
    596 /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
    597 
    598 /* DEPRECATED. Don't use it. */
    599 /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
    600 
    601 
    602 /* ----------------------------------------------------------------
    603    TSan sources say:
    604 
    605      Instruct the tool to create a happens-before arc between
    606      MU->Unlock() and MU->Lock().  This annotation may slow down the
    607      race detector; normally it is used only when it would be
    608      difficult to annotate each of the mutex's critical sections
    609      individually using the annotations above.
    610 
    611    If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
    612    In any case, leave as unimp for now.  I'm unsure about the intended
    613    behaviour.
    614    ----------------------------------------------------------------
    615 */
    616 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
    617    _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
    618 
    619 /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
    620 /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
    621 
    622 
    623 /* ----------------------------------------------------------------
    624    TSan sources say:
    625 
    626      Annotations useful when defining memory allocators, or when
    627      memory that was protected in one way starts to be protected in
    628      another.
    629 
    630      Report that a new memory at "address" of size "size" has been
    631      allocated.  This might be used when the memory has been retrieved
    632      from a free list and is about to be reused, or when a the locking
    633      discipline for a variable changes.
    634 
    635    AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
    636    ----------------------------------------------------------------
    637 */
    638 #define ANNOTATE_NEW_MEMORY(address, size) \
    639    VALGRIND_HG_CLEAN_MEMORY((address), (size))
    640 
    641 
    642 /* ----------------------------------------------------------------
    643    TSan sources say:
    644 
    645      Annotations useful when defining FIFO queues that transfer data
    646      between threads.
    647 
    648    All unimplemented.  Am not claiming to understand this (yet).
    649    ----------------------------------------------------------------
    650 */
    651 
    652 /* Report that the producer-consumer queue object at address PCQ has
    653    been created.  The ANNOTATE_PCQ_* annotations should be used only
    654    for FIFO queues.  For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
    655    (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
    656 #define ANNOTATE_PCQ_CREATE(pcq) \
    657    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
    658 
    659 /* Report that the queue at address PCQ is about to be destroyed. */
    660 #define ANNOTATE_PCQ_DESTROY(pcq) \
    661    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
    662 
    663 /* Report that we are about to put an element into a FIFO queue at
    664    address PCQ. */
    665 #define ANNOTATE_PCQ_PUT(pcq) \
    666    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
    667 
    668 /* Report that we've just got an element from a FIFO queue at address
    669    PCQ. */
    670 #define ANNOTATE_PCQ_GET(pcq) \
    671    _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
    672 
    673 
    674 /* ----------------------------------------------------------------
    675    Annotations that suppress errors.  It is usually better to express
    676    the program's synchronization using the other annotations, but
    677    these can be used when all else fails.
    678 
    679    Currently these are all unimplemented.  I can't think of a simple
    680    way to implement them without at least some performance overhead.
    681    ----------------------------------------------------------------
    682 */
    683 
    684 /* Report that we may have a benign race at "pointer", with size
    685    "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
    686    point where "pointer" has been allocated, preferably close to the point
    687    where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC.
    688 
    689    XXX: what's this actually supposed to do?  And what's the type of
    690    DESCRIPTION?  When does the annotation stop having an effect?
    691 */
    692 #define ANNOTATE_BENIGN_RACE(pointer, description) \
    693    _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
    694 
    695 /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
    696    the memory range [address, address+size). */
    697 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
    698    VALGRIND_HG_DISABLE_CHECKING(address, size)
    699 
    700 /* Request the analysis tool to ignore all reads in the current thread
    701    until ANNOTATE_IGNORE_READS_END is called.  Useful to ignore
    702    intentional racey reads, while still checking other reads and all
    703    writes. */
    704 #define ANNOTATE_IGNORE_READS_BEGIN() \
    705    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
    706 
    707 /* Stop ignoring reads. */
    708 #define ANNOTATE_IGNORE_READS_END() \
    709    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
    710 
    711 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
    712 #define ANNOTATE_IGNORE_WRITES_BEGIN() \
    713    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
    714 
    715 /* Stop ignoring writes. */
    716 #define ANNOTATE_IGNORE_WRITES_END() \
    717    _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
    718 
    719 /* Start ignoring all memory accesses (reads and writes). */
    720 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
    721    do { \
    722       ANNOTATE_IGNORE_READS_BEGIN(); \
    723       ANNOTATE_IGNORE_WRITES_BEGIN(); \
    724    } while (0)
    725 
    726 /* Stop ignoring all memory accesses. */
    727 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
    728    do { \
    729       ANNOTATE_IGNORE_WRITES_END(); \
    730       ANNOTATE_IGNORE_READS_END(); \
    731    } while (0)
    732 
    733 
    734 /* ----------------------------------------------------------------
    735    Annotations useful for debugging.
    736 
    737    Again, so for unimplemented, partly for performance reasons.
    738    ----------------------------------------------------------------
    739 */
    740 
    741 /* Request to trace every access to ADDRESS. */
    742 #define ANNOTATE_TRACE_MEMORY(address) \
    743    _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
    744 
    745 /* Report the current thread name to a race detector. */
    746 #define ANNOTATE_THREAD_NAME(name) \
    747    _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
    748 
    749 
    750 /* ----------------------------------------------------------------
    751    Annotations for describing behaviour of user-implemented lock
    752    primitives.  In all cases, the LOCK argument is a completely
    753    arbitrary machine word (unsigned long, or void*) and can be any
    754    value which gives a unique identity to the lock objects being
    755    modelled.
    756 
    757    We just pretend they're ordinary posix rwlocks.  That'll probably
    758    give some rather confusing wording in error messages, claiming that
    759    the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
    760    they are not.  Ah well.
    761    ----------------------------------------------------------------
    762 */
    763 /* Report that a lock has just been created at address LOCK. */
    764 #define ANNOTATE_RWLOCK_CREATE(lock)                         \
    765    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,     \
    766                void*,(lock))
    767 
    768 /* Report that the lock at address LOCK is about to be destroyed. */
    769 #define ANNOTATE_RWLOCK_DESTROY(lock)                        \
    770    DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE,   \
    771                void*,(lock))
    772 
    773 /* Report that the lock at address LOCK has just been acquired.
    774    is_w=1 for writer lock, is_w=0 for reader lock. */
    775 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                 \
    776   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED,      \
    777                void*,(lock), unsigned long,(is_w))
    778 
    779 /* Report that the lock at address LOCK is about to be released. */
    780 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)                 \
    781   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED,       \
    782               void*,(lock)) /* is_w is ignored */
    783 
    784 
    785 /* -------------------------------------------------------------
    786    Annotations useful when implementing barriers.  They are not
    787    normally needed by modules that merely use barriers.
    788    The "barrier" argument is a pointer to the barrier object.
    789    ----------------------------------------------------------------
    790 */
    791 
    792 /* Report that the "barrier" has been initialized with initial
    793    "count".  If 'reinitialization_allowed' is true, initialization is
    794    allowed to happen multiple times w/o calling barrier_destroy() */
    795 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
    796    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
    797 
    798 /* Report that we are about to enter barrier_wait("barrier"). */
    799 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
    800    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    801 
    802 /* Report that we just exited barrier_wait("barrier"). */
    803 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
    804    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    805 
    806 /* Report that the "barrier" has been destroyed. */
    807 #define ANNOTATE_BARRIER_DESTROY(barrier) \
    808    _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
    809 
    810 
    811 /* ----------------------------------------------------------------
    812    Annotations useful for testing race detectors.
    813    ----------------------------------------------------------------
    814 */
    815 
    816 /* Report that we expect a race on the variable at ADDRESS.  Use only
    817    in unit tests for a race detector. */
    818 #define ANNOTATE_EXPECT_RACE(address, description) \
    819    _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
    820 
    821 /* A no-op. Insert where you like to test the interceptors. */
    822 #define ANNOTATE_NO_OP(arg) \
    823    _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
    824 
    825 /* Force the race detector to flush its state. The actual effect depends on
    826  * the implementation of the detector. */
    827 #define ANNOTATE_FLUSH_STATE() \
    828    _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
    829 
    830 #endif /* __HELGRIND_H */
    831