Home | History | Annotate | Download | only in lsan
      1 //=-- lsan_interceptors.cc ------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of LeakSanitizer.
     11 // Interceptors for standalone LSan.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "sanitizer_common/sanitizer_allocator.h"
     16 #include "sanitizer_common/sanitizer_atomic.h"
     17 #include "sanitizer_common/sanitizer_common.h"
     18 #include "sanitizer_common/sanitizer_flags.h"
     19 #include "sanitizer_common/sanitizer_interception.h"
     20 #include "sanitizer_common/sanitizer_internal_defs.h"
     21 #include "sanitizer_common/sanitizer_linux.h"
     22 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
     23 #include "lsan.h"
     24 #include "lsan_allocator.h"
     25 #include "lsan_thread.h"
     26 
     27 using namespace __lsan;
     28 
     29 extern "C" {
     30 int pthread_attr_init(void *attr);
     31 int pthread_attr_destroy(void *attr);
     32 int pthread_attr_getdetachstate(void *attr, int *v);
     33 int pthread_key_create(unsigned *key, void (*destructor)(void* v));
     34 int pthread_setspecific(unsigned key, const void *v);
     35 }
     36 
     37 #define GET_STACK_TRACE                                              \
     38   StackTrace stack;                                                  \
     39   {                                                                  \
     40     uptr stack_top = 0, stack_bottom = 0;                            \
     41     ThreadContext *t;                                                \
     42     bool fast = common_flags()->fast_unwind_on_malloc;               \
     43     if (fast && (t = CurrentThreadContext())) {                      \
     44       stack_top = t->stack_end();                                    \
     45       stack_bottom = t->stack_begin();                               \
     46     }                                                                \
     47     stack.Unwind(__sanitizer::common_flags()->malloc_context_size,   \
     48                  StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), 0, \
     49                  stack_top, stack_bottom, fast);                     \
     50   }
     51 
     52 #define ENSURE_LSAN_INITED do {   \
     53   CHECK(!lsan_init_is_running);   \
     54   if (!lsan_inited)               \
     55     __lsan_init();                \
     56 } while (0)
     57 
     58 ///// Malloc/free interceptors. /////
     59 
     60 const bool kAlwaysClearMemory = true;
     61 
     62 namespace std {
     63   struct nothrow_t;
     64 }
     65 
     66 INTERCEPTOR(void*, malloc, uptr size) {
     67   ENSURE_LSAN_INITED;
     68   GET_STACK_TRACE;
     69   return Allocate(stack, size, 1, kAlwaysClearMemory);
     70 }
     71 
     72 INTERCEPTOR(void, free, void *p) {
     73   ENSURE_LSAN_INITED;
     74   Deallocate(p);
     75 }
     76 
     77 INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
     78   if (lsan_init_is_running) {
     79     // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
     80     const uptr kCallocPoolSize = 1024;
     81     static uptr calloc_memory_for_dlsym[kCallocPoolSize];
     82     static uptr allocated;
     83     uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
     84     void *mem = (void*)&calloc_memory_for_dlsym[allocated];
     85     allocated += size_in_words;
     86     CHECK(allocated < kCallocPoolSize);
     87     return mem;
     88   }
     89   if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
     90   ENSURE_LSAN_INITED;
     91   GET_STACK_TRACE;
     92   size *= nmemb;
     93   return Allocate(stack, size, 1, true);
     94 }
     95 
     96 INTERCEPTOR(void*, realloc, void *q, uptr size) {
     97   ENSURE_LSAN_INITED;
     98   GET_STACK_TRACE;
     99   return Reallocate(stack, q, size, 1);
    100 }
    101 
    102 INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
    103   ENSURE_LSAN_INITED;
    104   GET_STACK_TRACE;
    105   return Allocate(stack, size, alignment, kAlwaysClearMemory);
    106 }
    107 
    108 INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
    109   ENSURE_LSAN_INITED;
    110   GET_STACK_TRACE;
    111   return Allocate(stack, size, alignment, kAlwaysClearMemory);
    112 }
    113 
    114 INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
    115   ENSURE_LSAN_INITED;
    116   GET_STACK_TRACE;
    117   *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
    118   // FIXME: Return ENOMEM if user requested more than max alloc size.
    119   return 0;
    120 }
    121 
    122 INTERCEPTOR(void*, valloc, uptr size) {
    123   ENSURE_LSAN_INITED;
    124   GET_STACK_TRACE;
    125   if (size == 0)
    126     size = GetPageSizeCached();
    127   return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
    128 }
    129 
    130 INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
    131   ENSURE_LSAN_INITED;
    132   return GetMallocUsableSize(ptr);
    133 }
    134 
    135 struct fake_mallinfo {
    136   int x[10];
    137 };
    138 
    139 INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
    140   struct fake_mallinfo res;
    141   internal_memset(&res, 0, sizeof(res));
    142   return res;
    143 }
    144 
    145 INTERCEPTOR(int, mallopt, int cmd, int value) {
    146   return -1;
    147 }
    148 
    149 INTERCEPTOR(void*, pvalloc, uptr size) {
    150   ENSURE_LSAN_INITED;
    151   GET_STACK_TRACE;
    152   uptr PageSize = GetPageSizeCached();
    153   size = RoundUpTo(size, PageSize);
    154   if (size == 0) {
    155     // pvalloc(0) should allocate one page.
    156     size = PageSize;
    157   }
    158   return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
    159 }
    160 
    161 INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
    162 
    163 #define OPERATOR_NEW_BODY                              \
    164   ENSURE_LSAN_INITED;                                  \
    165   GET_STACK_TRACE;                                     \
    166   return Allocate(stack, size, 1, kAlwaysClearMemory);
    167 
    168 INTERCEPTOR_ATTRIBUTE
    169 void *operator new(uptr size) { OPERATOR_NEW_BODY; }
    170 INTERCEPTOR_ATTRIBUTE
    171 void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
    172 INTERCEPTOR_ATTRIBUTE
    173 void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
    174 INTERCEPTOR_ATTRIBUTE
    175 void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
    176 
    177 #define OPERATOR_DELETE_BODY \
    178   ENSURE_LSAN_INITED;        \
    179   Deallocate(ptr);
    180 
    181 INTERCEPTOR_ATTRIBUTE
    182 void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
    183 INTERCEPTOR_ATTRIBUTE
    184 void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
    185 INTERCEPTOR_ATTRIBUTE
    186 void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
    187 INTERCEPTOR_ATTRIBUTE
    188 void operator delete[](void *ptr, std::nothrow_t const &) {
    189   OPERATOR_DELETE_BODY;
    190 }
    191 
    192 // We need this to intercept the __libc_memalign calls that are used to
    193 // allocate dynamic TLS space in ld-linux.so.
    194 INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
    195     ALIAS(WRAPPER_NAME(memalign));
    196 
    197 ///// Thread initialization and finalization. /////
    198 
    199 static unsigned g_thread_finalize_key;
    200 
    201 static void thread_finalize(void *v) {
    202   uptr iter = (uptr)v;
    203   if (iter > 1) {
    204     if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
    205       Report("LeakSanitizer: failed to set thread key.\n");
    206       Die();
    207     }
    208     return;
    209   }
    210   ThreadFinish();
    211 }
    212 
    213 struct ThreadParam {
    214   void *(*callback)(void *arg);
    215   void *param;
    216   atomic_uintptr_t tid;
    217 };
    218 
    219 extern "C" void *__lsan_thread_start_func(void *arg) {
    220   ThreadParam *p = (ThreadParam*)arg;
    221   void* (*callback)(void *arg) = p->callback;
    222   void *param = p->param;
    223   // Wait until the last iteration to maximize the chance that we are the last
    224   // destructor to run.
    225   if (pthread_setspecific(g_thread_finalize_key,
    226                           (void*)kPthreadDestructorIterations)) {
    227     Report("LeakSanitizer: failed to set thread key.\n");
    228     Die();
    229   }
    230   int tid = 0;
    231   while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
    232     internal_sched_yield();
    233   atomic_store(&p->tid, 0, memory_order_release);
    234   SetCurrentThread(tid);
    235   ThreadStart(tid, GetTid());
    236   return callback(param);
    237 }
    238 
    239 INTERCEPTOR(int, pthread_create, void *th, void *attr,
    240             void *(*callback)(void *), void *param) {
    241   ENSURE_LSAN_INITED;
    242   EnsureMainThreadIDIsCorrect();
    243   __sanitizer_pthread_attr_t myattr;
    244   if (attr == 0) {
    245     pthread_attr_init(&myattr);
    246     attr = &myattr;
    247   }
    248   AdjustStackSize(attr);
    249   int detached = 0;
    250   pthread_attr_getdetachstate(attr, &detached);
    251   ThreadParam p;
    252   p.callback = callback;
    253   p.param = param;
    254   atomic_store(&p.tid, 0, memory_order_relaxed);
    255   int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
    256   if (res == 0) {
    257     int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
    258     CHECK_NE(tid, 0);
    259     atomic_store(&p.tid, tid, memory_order_release);
    260     while (atomic_load(&p.tid, memory_order_acquire) != 0)
    261       internal_sched_yield();
    262   }
    263   if (attr == &myattr)
    264     pthread_attr_destroy(&myattr);
    265   return res;
    266 }
    267 
    268 INTERCEPTOR(int, pthread_join, void *th, void **ret) {
    269   ENSURE_LSAN_INITED;
    270   int tid = ThreadTid((uptr)th);
    271   int res = REAL(pthread_join)(th, ret);
    272   if (res == 0)
    273     ThreadJoin(tid);
    274   return res;
    275 }
    276 
    277 namespace __lsan {
    278 
    279 void InitializeInterceptors() {
    280   INTERCEPT_FUNCTION(malloc);
    281   INTERCEPT_FUNCTION(free);
    282   INTERCEPT_FUNCTION(cfree);
    283   INTERCEPT_FUNCTION(calloc);
    284   INTERCEPT_FUNCTION(realloc);
    285   INTERCEPT_FUNCTION(memalign);
    286   INTERCEPT_FUNCTION(posix_memalign);
    287   INTERCEPT_FUNCTION(__libc_memalign);
    288   INTERCEPT_FUNCTION(valloc);
    289   INTERCEPT_FUNCTION(pvalloc);
    290   INTERCEPT_FUNCTION(malloc_usable_size);
    291   INTERCEPT_FUNCTION(mallinfo);
    292   INTERCEPT_FUNCTION(mallopt);
    293   INTERCEPT_FUNCTION(pthread_create);
    294   INTERCEPT_FUNCTION(pthread_join);
    295 
    296   if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
    297     Report("LeakSanitizer: failed to create thread key.\n");
    298     Die();
    299   }
    300 }
    301 
    302 }  // namespace __lsan
    303