Home | History | Annotate | Download | only in asan
      1 //===-- asan_allocator.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Implementation of ASan's memory allocator, 2-nd version.
     13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
     14 // with ThreadSanitizer and MemorySanitizer.
     15 //
     16 //===----------------------------------------------------------------------===//
     17 #include "asan_allocator.h"
     18 
     19 #include "asan_mapping.h"
     20 #include "asan_poisoning.h"
     21 #include "asan_report.h"
     22 #include "asan_stack.h"
     23 #include "asan_thread.h"
     24 #include "sanitizer_common/sanitizer_allocator_interface.h"
     25 #include "sanitizer_common/sanitizer_flags.h"
     26 #include "sanitizer_common/sanitizer_internal_defs.h"
     27 #include "sanitizer_common/sanitizer_list.h"
     28 #include "sanitizer_common/sanitizer_stackdepot.h"
     29 #include "sanitizer_common/sanitizer_quarantine.h"
     30 #include "lsan/lsan_common.h"
     31 
     32 namespace __asan {
     33 
     34 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
     35 // We use adaptive redzones: for larger allocation larger redzones are used.
     36 static u32 RZLog2Size(u32 rz_log) {
     37   CHECK_LT(rz_log, 8);
     38   return 16 << rz_log;
     39 }
     40 
     41 static u32 RZSize2Log(u32 rz_size) {
     42   CHECK_GE(rz_size, 16);
     43   CHECK_LE(rz_size, 2048);
     44   CHECK(IsPowerOfTwo(rz_size));
     45   u32 res = Log2(rz_size) - 4;
     46   CHECK_EQ(rz_size, RZLog2Size(res));
     47   return res;
     48 }
     49 
     50 static AsanAllocator &get_allocator();
     51 
     52 // The memory chunk allocated from the underlying allocator looks like this:
     53 // L L L L L L H H U U U U U U R R
     54 //   L -- left redzone words (0 or more bytes)
     55 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
     56 //   U -- user memory.
     57 //   R -- right redzone (0 or more bytes)
     58 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
     59 // memory.
     60 
     61 // If the left redzone is greater than the ChunkHeader size we store a magic
     62 // value in the first uptr word of the memory block and store the address of
     63 // ChunkBase in the next uptr.
     64 // M B L L L L L L L L L  H H U U U U U U
     65 //   |                    ^
     66 //   ---------------------|
     67 //   M -- magic value kAllocBegMagic
     68 //   B -- address of ChunkHeader pointing to the first 'H'
     69 static const uptr kAllocBegMagic = 0xCC6E96B9;
     70 
     71 struct ChunkHeader {
     72   // 1-st 8 bytes.
     73   u32 chunk_state       : 8;  // Must be first.
     74   u32 alloc_tid         : 24;
     75 
     76   u32 free_tid          : 24;
     77   u32 from_memalign     : 1;
     78   u32 alloc_type        : 2;
     79   u32 rz_log            : 3;
     80   u32 lsan_tag          : 2;
     81   // 2-nd 8 bytes
     82   // This field is used for small sizes. For large sizes it is equal to
     83   // SizeClassMap::kMaxSize and the actual size is stored in the
     84   // SecondaryAllocator's metadata.
     85   u32 user_requested_size;
     86   u32 alloc_context_id;
     87 };
     88 
     89 struct ChunkBase : ChunkHeader {
     90   // Header2, intersects with user memory.
     91   u32 free_context_id;
     92 };
     93 
     94 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
     95 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
     96 COMPILER_CHECK(kChunkHeaderSize == 16);
     97 COMPILER_CHECK(kChunkHeader2Size <= 16);
     98 
     99 // Every chunk of memory allocated by this allocator can be in one of 3 states:
    100 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
    101 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
    102 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
    103 enum {
    104   CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
    105   CHUNK_ALLOCATED  = 2,
    106   CHUNK_QUARANTINE = 3
    107 };
    108 
    109 struct AsanChunk: ChunkBase {
    110   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
    111   uptr UsedSize(bool locked_version = false) {
    112     if (user_requested_size != SizeClassMap::kMaxSize)
    113       return user_requested_size;
    114     return *reinterpret_cast<uptr *>(
    115                get_allocator().GetMetaData(AllocBeg(locked_version)));
    116   }
    117   void *AllocBeg(bool locked_version = false) {
    118     if (from_memalign) {
    119       if (locked_version)
    120         return get_allocator().GetBlockBeginFastLocked(
    121             reinterpret_cast<void *>(this));
    122       return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
    123     }
    124     return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
    125   }
    126   bool AddrIsInside(uptr addr, bool locked_version = false) {
    127     return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
    128   }
    129 };
    130 
    131 struct QuarantineCallback {
    132   explicit QuarantineCallback(AllocatorCache *cache)
    133       : cache_(cache) {
    134   }
    135 
    136   void Recycle(AsanChunk *m) {
    137     CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
    138     atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
    139     CHECK_NE(m->alloc_tid, kInvalidTid);
    140     CHECK_NE(m->free_tid, kInvalidTid);
    141     PoisonShadow(m->Beg(),
    142                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    143                  kAsanHeapLeftRedzoneMagic);
    144     void *p = reinterpret_cast<void *>(m->AllocBeg());
    145     if (p != m) {
    146       uptr *alloc_magic = reinterpret_cast<uptr *>(p);
    147       CHECK_EQ(alloc_magic[0], kAllocBegMagic);
    148       // Clear the magic value, as allocator internals may overwrite the
    149       // contents of deallocated chunk, confusing GetAsanChunk lookup.
    150       alloc_magic[0] = 0;
    151       CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
    152     }
    153 
    154     // Statistics.
    155     AsanStats &thread_stats = GetCurrentThreadStats();
    156     thread_stats.real_frees++;
    157     thread_stats.really_freed += m->UsedSize();
    158 
    159     get_allocator().Deallocate(cache_, p);
    160   }
    161 
    162   void *Allocate(uptr size) {
    163     return get_allocator().Allocate(cache_, size, 1, false);
    164   }
    165 
    166   void Deallocate(void *p) {
    167     get_allocator().Deallocate(cache_, p);
    168   }
    169 
    170   AllocatorCache *cache_;
    171 };
    172 
    173 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
    174 typedef AsanQuarantine::Cache QuarantineCache;
    175 
    176 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
    177   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
    178   // Statistics.
    179   AsanStats &thread_stats = GetCurrentThreadStats();
    180   thread_stats.mmaps++;
    181   thread_stats.mmaped += size;
    182 }
    183 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
    184   PoisonShadow(p, size, 0);
    185   // We are about to unmap a chunk of user memory.
    186   // Mark the corresponding shadow memory as not needed.
    187   FlushUnneededASanShadowMemory(p, size);
    188   // Statistics.
    189   AsanStats &thread_stats = GetCurrentThreadStats();
    190   thread_stats.munmaps++;
    191   thread_stats.munmaped += size;
    192 }
    193 
    194 // We can not use THREADLOCAL because it is not supported on some of the
    195 // platforms we care about (OSX 10.6, Android).
    196 // static THREADLOCAL AllocatorCache cache;
    197 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
    198   CHECK(ms);
    199   return &ms->allocator_cache;
    200 }
    201 
    202 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
    203   CHECK(ms);
    204   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
    205   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
    206 }
    207 
    208 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
    209   quarantine_size_mb = f->quarantine_size_mb;
    210   min_redzone = f->redzone;
    211   max_redzone = f->max_redzone;
    212   may_return_null = cf->allocator_may_return_null;
    213   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
    214 }
    215 
    216 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
    217   f->quarantine_size_mb = quarantine_size_mb;
    218   f->redzone = min_redzone;
    219   f->max_redzone = max_redzone;
    220   cf->allocator_may_return_null = may_return_null;
    221   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
    222 }
    223 
    224 struct Allocator {
    225   static const uptr kMaxAllowedMallocSize =
    226       FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
    227   static const uptr kMaxThreadLocalQuarantine =
    228       FIRST_32_SECOND_64(1 << 18, 1 << 20);
    229 
    230   AsanAllocator allocator;
    231   AsanQuarantine quarantine;
    232   StaticSpinMutex fallback_mutex;
    233   AllocatorCache fallback_allocator_cache;
    234   QuarantineCache fallback_quarantine_cache;
    235 
    236   // ------------------- Options --------------------------
    237   atomic_uint16_t min_redzone;
    238   atomic_uint16_t max_redzone;
    239   atomic_uint8_t alloc_dealloc_mismatch;
    240 
    241   // ------------------- Initialization ------------------------
    242   explicit Allocator(LinkerInitialized)
    243       : quarantine(LINKER_INITIALIZED),
    244         fallback_quarantine_cache(LINKER_INITIALIZED) {}
    245 
    246   void CheckOptions(const AllocatorOptions &options) const {
    247     CHECK_GE(options.min_redzone, 16);
    248     CHECK_GE(options.max_redzone, options.min_redzone);
    249     CHECK_LE(options.max_redzone, 2048);
    250     CHECK(IsPowerOfTwo(options.min_redzone));
    251     CHECK(IsPowerOfTwo(options.max_redzone));
    252   }
    253 
    254   void SharedInitCode(const AllocatorOptions &options) {
    255     CheckOptions(options);
    256     quarantine.Init((uptr)options.quarantine_size_mb << 20,
    257                     kMaxThreadLocalQuarantine);
    258     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
    259                  memory_order_release);
    260     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
    261     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
    262   }
    263 
    264   void Initialize(const AllocatorOptions &options) {
    265     allocator.Init(options.may_return_null);
    266     SharedInitCode(options);
    267   }
    268 
    269   void ReInitialize(const AllocatorOptions &options) {
    270     allocator.SetMayReturnNull(options.may_return_null);
    271     SharedInitCode(options);
    272   }
    273 
    274   void GetOptions(AllocatorOptions *options) const {
    275     options->quarantine_size_mb = quarantine.GetSize() >> 20;
    276     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
    277     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
    278     options->may_return_null = allocator.MayReturnNull();
    279     options->alloc_dealloc_mismatch =
    280         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
    281   }
    282 
    283   // -------------------- Helper methods. -------------------------
    284   uptr ComputeRZLog(uptr user_requested_size) {
    285     u32 rz_log =
    286       user_requested_size <= 64        - 16   ? 0 :
    287       user_requested_size <= 128       - 32   ? 1 :
    288       user_requested_size <= 512       - 64   ? 2 :
    289       user_requested_size <= 4096      - 128  ? 3 :
    290       user_requested_size <= (1 << 14) - 256  ? 4 :
    291       user_requested_size <= (1 << 15) - 512  ? 5 :
    292       user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
    293     u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
    294     u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
    295     return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
    296   }
    297 
    298   // We have an address between two chunks, and we want to report just one.
    299   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
    300                          AsanChunk *right_chunk) {
    301     // Prefer an allocated chunk over freed chunk and freed chunk
    302     // over available chunk.
    303     if (left_chunk->chunk_state != right_chunk->chunk_state) {
    304       if (left_chunk->chunk_state == CHUNK_ALLOCATED)
    305         return left_chunk;
    306       if (right_chunk->chunk_state == CHUNK_ALLOCATED)
    307         return right_chunk;
    308       if (left_chunk->chunk_state == CHUNK_QUARANTINE)
    309         return left_chunk;
    310       if (right_chunk->chunk_state == CHUNK_QUARANTINE)
    311         return right_chunk;
    312     }
    313     // Same chunk_state: choose based on offset.
    314     sptr l_offset = 0, r_offset = 0;
    315     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
    316     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
    317     if (l_offset < r_offset)
    318       return left_chunk;
    319     return right_chunk;
    320   }
    321 
    322   // -------------------- Allocation/Deallocation routines ---------------
    323   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
    324                  AllocType alloc_type, bool can_fill) {
    325     if (UNLIKELY(!asan_inited))
    326       AsanInitFromRtl();
    327     Flags &fl = *flags();
    328     CHECK(stack);
    329     const uptr min_alignment = SHADOW_GRANULARITY;
    330     if (alignment < min_alignment)
    331       alignment = min_alignment;
    332     if (size == 0) {
    333       // We'd be happy to avoid allocating memory for zero-size requests, but
    334       // some programs/tests depend on this behavior and assume that malloc
    335       // would not return NULL even for zero-size allocations. Moreover, it
    336       // looks like operator new should never return NULL, and results of
    337       // consecutive "new" calls must be different even if the allocated size
    338       // is zero.
    339       size = 1;
    340     }
    341     CHECK(IsPowerOfTwo(alignment));
    342     uptr rz_log = ComputeRZLog(size);
    343     uptr rz_size = RZLog2Size(rz_log);
    344     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
    345     uptr needed_size = rounded_size + rz_size;
    346     if (alignment > min_alignment)
    347       needed_size += alignment;
    348     bool using_primary_allocator = true;
    349     // If we are allocating from the secondary allocator, there will be no
    350     // automatic right redzone, so add the right redzone manually.
    351     if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
    352       needed_size += rz_size;
    353       using_primary_allocator = false;
    354     }
    355     CHECK(IsAligned(needed_size, min_alignment));
    356     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
    357       Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
    358              (void*)size);
    359       return allocator.ReturnNullOrDie();
    360     }
    361 
    362     AsanThread *t = GetCurrentThread();
    363     void *allocated;
    364     bool check_rss_limit = true;
    365     if (t) {
    366       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
    367       allocated =
    368           allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
    369     } else {
    370       SpinMutexLock l(&fallback_mutex);
    371       AllocatorCache *cache = &fallback_allocator_cache;
    372       allocated =
    373           allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
    374     }
    375 
    376     if (!allocated)
    377       return allocator.ReturnNullOrDie();
    378 
    379     if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
    380       // Heap poisoning is enabled, but the allocator provides an unpoisoned
    381       // chunk. This is possible if CanPoisonMemory() was false for some
    382       // time, for example, due to flags()->start_disabled.
    383       // Anyway, poison the block before using it for anything else.
    384       uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
    385       PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
    386     }
    387 
    388     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
    389     uptr alloc_end = alloc_beg + needed_size;
    390     uptr beg_plus_redzone = alloc_beg + rz_size;
    391     uptr user_beg = beg_plus_redzone;
    392     if (!IsAligned(user_beg, alignment))
    393       user_beg = RoundUpTo(user_beg, alignment);
    394     uptr user_end = user_beg + size;
    395     CHECK_LE(user_end, alloc_end);
    396     uptr chunk_beg = user_beg - kChunkHeaderSize;
    397     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    398     m->alloc_type = alloc_type;
    399     m->rz_log = rz_log;
    400     u32 alloc_tid = t ? t->tid() : 0;
    401     m->alloc_tid = alloc_tid;
    402     CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
    403     m->free_tid = kInvalidTid;
    404     m->from_memalign = user_beg != beg_plus_redzone;
    405     if (alloc_beg != chunk_beg) {
    406       CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
    407       reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
    408       reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
    409     }
    410     if (using_primary_allocator) {
    411       CHECK(size);
    412       m->user_requested_size = size;
    413       CHECK(allocator.FromPrimary(allocated));
    414     } else {
    415       CHECK(!allocator.FromPrimary(allocated));
    416       m->user_requested_size = SizeClassMap::kMaxSize;
    417       uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
    418       meta[0] = size;
    419       meta[1] = chunk_beg;
    420     }
    421 
    422     m->alloc_context_id = StackDepotPut(*stack);
    423 
    424     uptr size_rounded_down_to_granularity =
    425         RoundDownTo(size, SHADOW_GRANULARITY);
    426     // Unpoison the bulk of the memory region.
    427     if (size_rounded_down_to_granularity)
    428       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
    429     // Deal with the end of the region if size is not aligned to granularity.
    430     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
    431       u8 *shadow =
    432           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
    433       *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
    434     }
    435 
    436     AsanStats &thread_stats = GetCurrentThreadStats();
    437     thread_stats.mallocs++;
    438     thread_stats.malloced += size;
    439     thread_stats.malloced_redzones += needed_size - size;
    440     uptr class_id =
    441         Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
    442     thread_stats.malloced_by_size[class_id]++;
    443     if (needed_size > SizeClassMap::kMaxSize)
    444       thread_stats.malloc_large++;
    445 
    446     void *res = reinterpret_cast<void *>(user_beg);
    447     if (can_fill && fl.max_malloc_fill_size) {
    448       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
    449       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
    450     }
    451 #if CAN_SANITIZE_LEAKS
    452     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
    453                                                  : __lsan::kDirectlyLeaked;
    454 #endif
    455     // Must be the last mutation of metadata in this function.
    456     atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
    457     ASAN_MALLOC_HOOK(res, size);
    458     return res;
    459   }
    460 
    461   void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
    462                                    BufferedStackTrace *stack) {
    463     u8 old_chunk_state = CHUNK_ALLOCATED;
    464     // Flip the chunk_state atomically to avoid race on double-free.
    465     if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
    466                                         CHUNK_QUARANTINE, memory_order_acquire))
    467       ReportInvalidFree(ptr, old_chunk_state, stack);
    468     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
    469   }
    470 
    471   // Expects the chunk to already be marked as quarantined by using
    472   // AtomicallySetQuarantineFlag.
    473   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
    474                        AllocType alloc_type) {
    475     CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
    476 
    477     if (m->alloc_type != alloc_type) {
    478       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
    479         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
    480                                 (AllocType)alloc_type);
    481       }
    482     }
    483 
    484     CHECK_GE(m->alloc_tid, 0);
    485     if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
    486       CHECK_EQ(m->free_tid, kInvalidTid);
    487     AsanThread *t = GetCurrentThread();
    488     m->free_tid = t ? t->tid() : 0;
    489     m->free_context_id = StackDepotPut(*stack);
    490     // Poison the region.
    491     PoisonShadow(m->Beg(),
    492                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    493                  kAsanHeapFreeMagic);
    494 
    495     AsanStats &thread_stats = GetCurrentThreadStats();
    496     thread_stats.frees++;
    497     thread_stats.freed += m->UsedSize();
    498 
    499     // Push into quarantine.
    500     if (t) {
    501       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
    502       AllocatorCache *ac = GetAllocatorCache(ms);
    503       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
    504                            m->UsedSize());
    505     } else {
    506       SpinMutexLock l(&fallback_mutex);
    507       AllocatorCache *ac = &fallback_allocator_cache;
    508       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
    509                            m->UsedSize());
    510     }
    511   }
    512 
    513   void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
    514                   AllocType alloc_type) {
    515     uptr p = reinterpret_cast<uptr>(ptr);
    516     if (p == 0) return;
    517 
    518     uptr chunk_beg = p - kChunkHeaderSize;
    519     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    520     if (delete_size && flags()->new_delete_type_mismatch &&
    521         delete_size != m->UsedSize()) {
    522       ReportNewDeleteSizeMismatch(p, delete_size, stack);
    523     }
    524     ASAN_FREE_HOOK(ptr);
    525     // Must mark the chunk as quarantined before any changes to its metadata.
    526     AtomicallySetQuarantineFlag(m, ptr, stack);
    527     QuarantineChunk(m, ptr, stack, alloc_type);
    528   }
    529 
    530   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
    531     CHECK(old_ptr && new_size);
    532     uptr p = reinterpret_cast<uptr>(old_ptr);
    533     uptr chunk_beg = p - kChunkHeaderSize;
    534     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    535 
    536     AsanStats &thread_stats = GetCurrentThreadStats();
    537     thread_stats.reallocs++;
    538     thread_stats.realloced += new_size;
    539 
    540     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
    541     if (new_ptr) {
    542       u8 chunk_state = m->chunk_state;
    543       if (chunk_state != CHUNK_ALLOCATED)
    544         ReportInvalidFree(old_ptr, chunk_state, stack);
    545       CHECK_NE(REAL(memcpy), (void*)0);
    546       uptr memcpy_size = Min(new_size, m->UsedSize());
    547       // If realloc() races with free(), we may start copying freed memory.
    548       // However, we will report racy double-free later anyway.
    549       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
    550       Deallocate(old_ptr, 0, stack, FROM_MALLOC);
    551     }
    552     return new_ptr;
    553   }
    554 
    555   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
    556     if (CallocShouldReturnNullDueToOverflow(size, nmemb))
    557       return allocator.ReturnNullOrDie();
    558     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
    559     // If the memory comes from the secondary allocator no need to clear it
    560     // as it comes directly from mmap.
    561     if (ptr && allocator.FromPrimary(ptr))
    562       REAL(memset)(ptr, 0, nmemb * size);
    563     return ptr;
    564   }
    565 
    566   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
    567     if (chunk_state == CHUNK_QUARANTINE)
    568       ReportDoubleFree((uptr)ptr, stack);
    569     else
    570       ReportFreeNotMalloced((uptr)ptr, stack);
    571   }
    572 
    573   void CommitBack(AsanThreadLocalMallocStorage *ms) {
    574     AllocatorCache *ac = GetAllocatorCache(ms);
    575     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
    576     allocator.SwallowCache(ac);
    577   }
    578 
    579   // -------------------------- Chunk lookup ----------------------
    580 
    581   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
    582   AsanChunk *GetAsanChunk(void *alloc_beg) {
    583     if (!alloc_beg) return 0;
    584     if (!allocator.FromPrimary(alloc_beg)) {
    585       uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
    586       AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
    587       return m;
    588     }
    589     uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
    590     if (alloc_magic[0] == kAllocBegMagic)
    591       return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
    592     return reinterpret_cast<AsanChunk *>(alloc_beg);
    593   }
    594 
    595   AsanChunk *GetAsanChunkByAddr(uptr p) {
    596     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
    597     return GetAsanChunk(alloc_beg);
    598   }
    599 
    600   // Allocator must be locked when this function is called.
    601   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
    602     void *alloc_beg =
    603         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
    604     return GetAsanChunk(alloc_beg);
    605   }
    606 
    607   uptr AllocationSize(uptr p) {
    608     AsanChunk *m = GetAsanChunkByAddr(p);
    609     if (!m) return 0;
    610     if (m->chunk_state != CHUNK_ALLOCATED) return 0;
    611     if (m->Beg() != p) return 0;
    612     return m->UsedSize();
    613   }
    614 
    615   AsanChunkView FindHeapChunkByAddress(uptr addr) {
    616     AsanChunk *m1 = GetAsanChunkByAddr(addr);
    617     if (!m1) return AsanChunkView(m1);
    618     sptr offset = 0;
    619     if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
    620       // The address is in the chunk's left redzone, so maybe it is actually
    621       // a right buffer overflow from the other chunk to the left.
    622       // Search a bit to the left to see if there is another chunk.
    623       AsanChunk *m2 = 0;
    624       for (uptr l = 1; l < GetPageSizeCached(); l++) {
    625         m2 = GetAsanChunkByAddr(addr - l);
    626         if (m2 == m1) continue;  // Still the same chunk.
    627         break;
    628       }
    629       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
    630         m1 = ChooseChunk(addr, m2, m1);
    631     }
    632     return AsanChunkView(m1);
    633   }
    634 
    635   void PrintStats() {
    636     allocator.PrintStats();
    637   }
    638 
    639   void ForceLock() {
    640     allocator.ForceLock();
    641     fallback_mutex.Lock();
    642   }
    643 
    644   void ForceUnlock() {
    645     fallback_mutex.Unlock();
    646     allocator.ForceUnlock();
    647   }
    648 };
    649 
    650 static Allocator instance(LINKER_INITIALIZED);
    651 
    652 static AsanAllocator &get_allocator() {
    653   return instance.allocator;
    654 }
    655 
    656 bool AsanChunkView::IsValid() {
    657   return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
    658 }
    659 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
    660 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
    661 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
    662 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
    663 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
    664 
    665 static StackTrace GetStackTraceFromId(u32 id) {
    666   CHECK(id);
    667   StackTrace res = StackDepotGet(id);
    668   CHECK(res.trace);
    669   return res;
    670 }
    671 
    672 StackTrace AsanChunkView::GetAllocStack() {
    673   return GetStackTraceFromId(chunk_->alloc_context_id);
    674 }
    675 
    676 StackTrace AsanChunkView::GetFreeStack() {
    677   return GetStackTraceFromId(chunk_->free_context_id);
    678 }
    679 
    680 void InitializeAllocator(const AllocatorOptions &options) {
    681   instance.Initialize(options);
    682 }
    683 
    684 void ReInitializeAllocator(const AllocatorOptions &options) {
    685   instance.ReInitialize(options);
    686 }
    687 
    688 void GetAllocatorOptions(AllocatorOptions *options) {
    689   instance.GetOptions(options);
    690 }
    691 
    692 AsanChunkView FindHeapChunkByAddress(uptr addr) {
    693   return instance.FindHeapChunkByAddress(addr);
    694 }
    695 
    696 void AsanThreadLocalMallocStorage::CommitBack() {
    697   instance.CommitBack(this);
    698 }
    699 
    700 void PrintInternalAllocatorStats() {
    701   instance.PrintStats();
    702 }
    703 
    704 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
    705                     AllocType alloc_type) {
    706   return instance.Allocate(size, alignment, stack, alloc_type, true);
    707 }
    708 
    709 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
    710   instance.Deallocate(ptr, 0, stack, alloc_type);
    711 }
    712 
    713 void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
    714                      AllocType alloc_type) {
    715   instance.Deallocate(ptr, size, stack, alloc_type);
    716 }
    717 
    718 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
    719   return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
    720 }
    721 
    722 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
    723   return instance.Calloc(nmemb, size, stack);
    724 }
    725 
    726 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
    727   if (p == 0)
    728     return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
    729   if (size == 0) {
    730     instance.Deallocate(p, 0, stack, FROM_MALLOC);
    731     return 0;
    732   }
    733   return instance.Reallocate(p, size, stack);
    734 }
    735 
    736 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
    737   return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
    738 }
    739 
    740 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
    741   uptr PageSize = GetPageSizeCached();
    742   size = RoundUpTo(size, PageSize);
    743   if (size == 0) {
    744     // pvalloc(0) should allocate one page.
    745     size = PageSize;
    746   }
    747   return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
    748 }
    749 
    750 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
    751                         BufferedStackTrace *stack) {
    752   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
    753   CHECK(IsAligned((uptr)ptr, alignment));
    754   *memptr = ptr;
    755   return 0;
    756 }
    757 
    758 uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
    759   if (ptr == 0) return 0;
    760   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
    761   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
    762     GET_STACK_TRACE_FATAL(pc, bp);
    763     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
    764   }
    765   return usable_size;
    766 }
    767 
    768 uptr asan_mz_size(const void *ptr) {
    769   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
    770 }
    771 
    772 void asan_mz_force_lock() {
    773   instance.ForceLock();
    774 }
    775 
    776 void asan_mz_force_unlock() {
    777   instance.ForceUnlock();
    778 }
    779 
    780 void AsanSoftRssLimitExceededCallback(bool exceeded) {
    781   instance.allocator.SetRssLimitIsExceeded(exceeded);
    782 }
    783 
    784 }  // namespace __asan
    785 
    786 // --- Implementation of LSan-specific functions --- {{{1
    787 namespace __lsan {
    788 void LockAllocator() {
    789   __asan::get_allocator().ForceLock();
    790 }
    791 
    792 void UnlockAllocator() {
    793   __asan::get_allocator().ForceUnlock();
    794 }
    795 
    796 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
    797   *begin = (uptr)&__asan::get_allocator();
    798   *end = *begin + sizeof(__asan::get_allocator());
    799 }
    800 
    801 uptr PointsIntoChunk(void* p) {
    802   uptr addr = reinterpret_cast<uptr>(p);
    803   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
    804   if (!m) return 0;
    805   uptr chunk = m->Beg();
    806   if (m->chunk_state != __asan::CHUNK_ALLOCATED)
    807     return 0;
    808   if (m->AddrIsInside(addr, /*locked_version=*/true))
    809     return chunk;
    810   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
    811                                   addr))
    812     return chunk;
    813   return 0;
    814 }
    815 
    816 uptr GetUserBegin(uptr chunk) {
    817   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
    818   CHECK(m);
    819   return m->Beg();
    820 }
    821 
    822 LsanMetadata::LsanMetadata(uptr chunk) {
    823   metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
    824 }
    825 
    826 bool LsanMetadata::allocated() const {
    827   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    828   return m->chunk_state == __asan::CHUNK_ALLOCATED;
    829 }
    830 
    831 ChunkTag LsanMetadata::tag() const {
    832   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    833   return static_cast<ChunkTag>(m->lsan_tag);
    834 }
    835 
    836 void LsanMetadata::set_tag(ChunkTag value) {
    837   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    838   m->lsan_tag = value;
    839 }
    840 
    841 uptr LsanMetadata::requested_size() const {
    842   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    843   return m->UsedSize(/*locked_version=*/true);
    844 }
    845 
    846 u32 LsanMetadata::stack_trace_id() const {
    847   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    848   return m->alloc_context_id;
    849 }
    850 
    851 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    852   __asan::get_allocator().ForEachChunk(callback, arg);
    853 }
    854 
    855 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
    856   uptr addr = reinterpret_cast<uptr>(p);
    857   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
    858   if (!m) return kIgnoreObjectInvalid;
    859   if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
    860     if (m->lsan_tag == kIgnored)
    861       return kIgnoreObjectAlreadyIgnored;
    862     m->lsan_tag = __lsan::kIgnored;
    863     return kIgnoreObjectSuccess;
    864   } else {
    865     return kIgnoreObjectInvalid;
    866   }
    867 }
    868 }  // namespace __lsan
    869 
    870 // ---------------------- Interface ---------------- {{{1
    871 using namespace __asan;  // NOLINT
    872 
    873 // ASan allocator doesn't reserve extra bytes, so normally we would
    874 // just return "size". We don't want to expose our redzone sizes, etc here.
    875 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
    876   return size;
    877 }
    878 
    879 int __sanitizer_get_ownership(const void *p) {
    880   uptr ptr = reinterpret_cast<uptr>(p);
    881   return instance.AllocationSize(ptr) > 0;
    882 }
    883 
    884 uptr __sanitizer_get_allocated_size(const void *p) {
    885   if (p == 0) return 0;
    886   uptr ptr = reinterpret_cast<uptr>(p);
    887   uptr allocated_size = instance.AllocationSize(ptr);
    888   // Die if p is not malloced or if it is already freed.
    889   if (allocated_size == 0) {
    890     GET_STACK_TRACE_FATAL_HERE;
    891     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
    892   }
    893   return allocated_size;
    894 }
    895 
    896 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
    897 // Provide default (no-op) implementation of malloc hooks.
    898 extern "C" {
    899 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    900 void __sanitizer_malloc_hook(void *ptr, uptr size) {
    901   (void)ptr;
    902   (void)size;
    903 }
    904 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    905 void __sanitizer_free_hook(void *ptr) {
    906   (void)ptr;
    907 }
    908 }  // extern "C"
    909 #endif
    910