Home | History | Annotate | Download | only in rtl
      1 //===-- tsan_mman.cc ------------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 #include "sanitizer_common/sanitizer_allocator_interface.h"
     14 #include "sanitizer_common/sanitizer_common.h"
     15 #include "sanitizer_common/sanitizer_placement_new.h"
     16 #include "tsan_mman.h"
     17 #include "tsan_rtl.h"
     18 #include "tsan_report.h"
     19 #include "tsan_flags.h"
     20 
     21 // May be overriden by front-end.
     22 SANITIZER_WEAK_DEFAULT_IMPL
     23 void __sanitizer_malloc_hook(void *ptr, uptr size) {
     24   (void)ptr;
     25   (void)size;
     26 }
     27 
     28 SANITIZER_WEAK_DEFAULT_IMPL
     29 void __sanitizer_free_hook(void *ptr) {
     30   (void)ptr;
     31 }
     32 
     33 namespace __tsan {
     34 
     35 struct MapUnmapCallback {
     36   void OnMap(uptr p, uptr size) const { }
     37   void OnUnmap(uptr p, uptr size) const {
     38     // We are about to unmap a chunk of user memory.
     39     // Mark the corresponding shadow memory as not needed.
     40     DontNeedShadowFor(p, size);
     41     // Mark the corresponding meta shadow memory as not needed.
     42     // Note the block does not contain any meta info at this point
     43     // (this happens after free).
     44     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
     45     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
     46     // Block came from LargeMmapAllocator, so must be large.
     47     // We rely on this in the calculations below.
     48     CHECK_GE(size, 2 * kPageSize);
     49     uptr diff = RoundUp(p, kPageSize) - p;
     50     if (diff != 0) {
     51       p += diff;
     52       size -= diff;
     53     }
     54     diff = p + size - RoundDown(p + size, kPageSize);
     55     if (diff != 0)
     56       size -= diff;
     57     FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
     58   }
     59 };
     60 
     61 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
     62 Allocator *allocator() {
     63   return reinterpret_cast<Allocator*>(&allocator_placeholder);
     64 }
     65 
     66 void InitializeAllocator() {
     67   allocator()->Init(common_flags()->allocator_may_return_null);
     68 }
     69 
     70 void AllocatorThreadStart(ThreadState *thr) {
     71   allocator()->InitCache(&thr->alloc_cache);
     72   internal_allocator()->InitCache(&thr->internal_alloc_cache);
     73 }
     74 
     75 void AllocatorThreadFinish(ThreadState *thr) {
     76   allocator()->DestroyCache(&thr->alloc_cache);
     77   internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
     78 }
     79 
     80 void AllocatorPrintStats() {
     81   allocator()->PrintStats();
     82 }
     83 
     84 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
     85   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
     86       !flags()->report_signal_unsafe)
     87     return;
     88   VarSizeStackTrace stack;
     89   ObtainCurrentStack(thr, pc, &stack);
     90   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
     91     return;
     92   ThreadRegistryLock l(ctx->thread_registry);
     93   ScopedReport rep(ReportTypeSignalUnsafe);
     94   rep.AddStack(stack, true);
     95   OutputReport(thr, rep);
     96 }
     97 
     98 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
     99   if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
    100     return allocator()->ReturnNullOrDie();
    101   void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
    102   if (p == 0)
    103     return 0;
    104   if (ctx && ctx->initialized)
    105     OnUserAlloc(thr, pc, (uptr)p, sz, true);
    106   if (signal)
    107     SignalUnsafeCall(thr, pc);
    108   return p;
    109 }
    110 
    111 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
    112   if (CallocShouldReturnNullDueToOverflow(size, n))
    113     return allocator()->ReturnNullOrDie();
    114   void *p = user_alloc(thr, pc, n * size);
    115   if (p)
    116     internal_memset(p, 0, n * size);
    117   return p;
    118 }
    119 
    120 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
    121   if (ctx && ctx->initialized)
    122     OnUserFree(thr, pc, (uptr)p, true);
    123   allocator()->Deallocate(&thr->alloc_cache, p);
    124   if (signal)
    125     SignalUnsafeCall(thr, pc);
    126 }
    127 
    128 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
    129   DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
    130   ctx->metamap.AllocBlock(thr, pc, p, sz);
    131   if (write && thr->ignore_reads_and_writes == 0)
    132     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
    133   else
    134     MemoryResetRange(thr, pc, (uptr)p, sz);
    135 }
    136 
    137 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
    138   CHECK_NE(p, (void*)0);
    139   uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
    140   DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
    141   if (write && thr->ignore_reads_and_writes == 0)
    142     MemoryRangeFreed(thr, pc, (uptr)p, sz);
    143 }
    144 
    145 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
    146   void *p2 = 0;
    147   // FIXME: Handle "shrinking" more efficiently,
    148   // it seems that some software actually does this.
    149   if (sz) {
    150     p2 = user_alloc(thr, pc, sz);
    151     if (p2 == 0)
    152       return 0;
    153     if (p) {
    154       uptr oldsz = user_alloc_usable_size(p);
    155       internal_memcpy(p2, p, min(oldsz, sz));
    156     }
    157   }
    158   if (p)
    159     user_free(thr, pc, p);
    160   return p2;
    161 }
    162 
    163 uptr user_alloc_usable_size(const void *p) {
    164   if (p == 0)
    165     return 0;
    166   MBlock *b = ctx->metamap.GetBlock((uptr)p);
    167   return b ? b->siz : 0;
    168 }
    169 
    170 void invoke_malloc_hook(void *ptr, uptr size) {
    171   ThreadState *thr = cur_thread();
    172   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
    173     return;
    174   __sanitizer_malloc_hook(ptr, size);
    175 }
    176 
    177 void invoke_free_hook(void *ptr) {
    178   ThreadState *thr = cur_thread();
    179   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
    180     return;
    181   __sanitizer_free_hook(ptr);
    182 }
    183 
    184 void *internal_alloc(MBlockType typ, uptr sz) {
    185   ThreadState *thr = cur_thread();
    186   if (thr->nomalloc) {
    187     thr->nomalloc = 0;  // CHECK calls internal_malloc().
    188     CHECK(0);
    189   }
    190   return InternalAlloc(sz, &thr->internal_alloc_cache);
    191 }
    192 
    193 void internal_free(void *p) {
    194   ThreadState *thr = cur_thread();
    195   if (thr->nomalloc) {
    196     thr->nomalloc = 0;  // CHECK calls internal_malloc().
    197     CHECK(0);
    198   }
    199   InternalFree(p, &thr->internal_alloc_cache);
    200 }
    201 
    202 }  // namespace __tsan
    203 
    204 using namespace __tsan;
    205 
    206 extern "C" {
    207 uptr __sanitizer_get_current_allocated_bytes() {
    208   uptr stats[AllocatorStatCount];
    209   allocator()->GetStats(stats);
    210   return stats[AllocatorStatAllocated];
    211 }
    212 
    213 uptr __sanitizer_get_heap_size() {
    214   uptr stats[AllocatorStatCount];
    215   allocator()->GetStats(stats);
    216   return stats[AllocatorStatMapped];
    217 }
    218 
    219 uptr __sanitizer_get_free_bytes() {
    220   return 1;
    221 }
    222 
    223 uptr __sanitizer_get_unmapped_bytes() {
    224   return 1;
    225 }
    226 
    227 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
    228   return size;
    229 }
    230 
    231 int __sanitizer_get_ownership(const void *p) {
    232   return allocator()->GetBlockBegin(p) != 0;
    233 }
    234 
    235 uptr __sanitizer_get_allocated_size(const void *p) {
    236   return user_alloc_usable_size(p);
    237 }
    238 
    239 void __tsan_on_thread_idle() {
    240   ThreadState *thr = cur_thread();
    241   allocator()->SwallowCache(&thr->alloc_cache);
    242   internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
    243   ctx->metamap.OnThreadIdle(thr);
    244 }
    245 }  // extern "C"
    246