Home | History | Annotate | Download | only in asan
      1 //===-- asan_allocator.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Implementation of ASan's memory allocator, 2-nd version.
     13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
     14 // with ThreadSanitizer and MemorySanitizer.
     15 //
     16 //===----------------------------------------------------------------------===//
     17 
     18 #include "asan_allocator.h"
     19 #include "asan_mapping.h"
     20 #include "asan_poisoning.h"
     21 #include "asan_report.h"
     22 #include "asan_stack.h"
     23 #include "asan_thread.h"
     24 #include "sanitizer_common/sanitizer_allocator_interface.h"
     25 #include "sanitizer_common/sanitizer_flags.h"
     26 #include "sanitizer_common/sanitizer_internal_defs.h"
     27 #include "sanitizer_common/sanitizer_list.h"
     28 #include "sanitizer_common/sanitizer_stackdepot.h"
     29 #include "sanitizer_common/sanitizer_quarantine.h"
     30 #include "lsan/lsan_common.h"
     31 
     32 namespace __asan {
     33 
     34 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
     35 // We use adaptive redzones: for larger allocation larger redzones are used.
     36 static u32 RZLog2Size(u32 rz_log) {
     37   CHECK_LT(rz_log, 8);
     38   return 16 << rz_log;
     39 }
     40 
     41 static u32 RZSize2Log(u32 rz_size) {
     42   CHECK_GE(rz_size, 16);
     43   CHECK_LE(rz_size, 2048);
     44   CHECK(IsPowerOfTwo(rz_size));
     45   u32 res = Log2(rz_size) - 4;
     46   CHECK_EQ(rz_size, RZLog2Size(res));
     47   return res;
     48 }
     49 
     50 static AsanAllocator &get_allocator();
     51 
     52 // The memory chunk allocated from the underlying allocator looks like this:
     53 // L L L L L L H H U U U U U U R R
     54 //   L -- left redzone words (0 or more bytes)
     55 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
     56 //   U -- user memory.
     57 //   R -- right redzone (0 or more bytes)
     58 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
     59 // memory.
     60 
     61 // If the left redzone is greater than the ChunkHeader size we store a magic
     62 // value in the first uptr word of the memory block and store the address of
     63 // ChunkBase in the next uptr.
     64 // M B L L L L L L L L L  H H U U U U U U
     65 //   |                    ^
     66 //   ---------------------|
     67 //   M -- magic value kAllocBegMagic
     68 //   B -- address of ChunkHeader pointing to the first 'H'
     69 static const uptr kAllocBegMagic = 0xCC6E96B9;
     70 
     71 struct ChunkHeader {
     72   // 1-st 8 bytes.
     73   u32 chunk_state       : 8;  // Must be first.
     74   u32 alloc_tid         : 24;
     75 
     76   u32 free_tid          : 24;
     77   u32 from_memalign     : 1;
     78   u32 alloc_type        : 2;
     79   u32 rz_log            : 3;
     80   u32 lsan_tag          : 2;
     81   // 2-nd 8 bytes
     82   // This field is used for small sizes. For large sizes it is equal to
     83   // SizeClassMap::kMaxSize and the actual size is stored in the
     84   // SecondaryAllocator's metadata.
     85   u32 user_requested_size;
     86   u32 alloc_context_id;
     87 };
     88 
     89 struct ChunkBase : ChunkHeader {
     90   // Header2, intersects with user memory.
     91   u32 free_context_id;
     92 };
     93 
     94 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
     95 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
     96 COMPILER_CHECK(kChunkHeaderSize == 16);
     97 COMPILER_CHECK(kChunkHeader2Size <= 16);
     98 
     99 // Every chunk of memory allocated by this allocator can be in one of 3 states:
    100 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
    101 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
    102 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
    103 enum {
    104   CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
    105   CHUNK_ALLOCATED  = 2,
    106   CHUNK_QUARANTINE = 3
    107 };
    108 
    109 struct AsanChunk: ChunkBase {
    110   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
    111   uptr UsedSize(bool locked_version = false) {
    112     if (user_requested_size != SizeClassMap::kMaxSize)
    113       return user_requested_size;
    114     return *reinterpret_cast<uptr *>(
    115                get_allocator().GetMetaData(AllocBeg(locked_version)));
    116   }
    117   void *AllocBeg(bool locked_version = false) {
    118     if (from_memalign) {
    119       if (locked_version)
    120         return get_allocator().GetBlockBeginFastLocked(
    121             reinterpret_cast<void *>(this));
    122       return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
    123     }
    124     return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
    125   }
    126   bool AddrIsInside(uptr addr, bool locked_version = false) {
    127     return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
    128   }
    129 };
    130 
    131 struct QuarantineCallback {
    132   explicit QuarantineCallback(AllocatorCache *cache)
    133       : cache_(cache) {
    134   }
    135 
    136   void Recycle(AsanChunk *m) {
    137     CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
    138     atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
    139     CHECK_NE(m->alloc_tid, kInvalidTid);
    140     CHECK_NE(m->free_tid, kInvalidTid);
    141     PoisonShadow(m->Beg(),
    142                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    143                  kAsanHeapLeftRedzoneMagic);
    144     void *p = reinterpret_cast<void *>(m->AllocBeg());
    145     if (p != m) {
    146       uptr *alloc_magic = reinterpret_cast<uptr *>(p);
    147       CHECK_EQ(alloc_magic[0], kAllocBegMagic);
    148       // Clear the magic value, as allocator internals may overwrite the
    149       // contents of deallocated chunk, confusing GetAsanChunk lookup.
    150       alloc_magic[0] = 0;
    151       CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
    152     }
    153 
    154     // Statistics.
    155     AsanStats &thread_stats = GetCurrentThreadStats();
    156     thread_stats.real_frees++;
    157     thread_stats.really_freed += m->UsedSize();
    158 
    159     get_allocator().Deallocate(cache_, p);
    160   }
    161 
    162   void *Allocate(uptr size) {
    163     return get_allocator().Allocate(cache_, size, 1, false);
    164   }
    165 
    166   void Deallocate(void *p) {
    167     get_allocator().Deallocate(cache_, p);
    168   }
    169 
    170   AllocatorCache *cache_;
    171 };
    172 
    173 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
    174 typedef AsanQuarantine::Cache QuarantineCache;
    175 
    176 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
    177   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
    178   // Statistics.
    179   AsanStats &thread_stats = GetCurrentThreadStats();
    180   thread_stats.mmaps++;
    181   thread_stats.mmaped += size;
    182 }
    183 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
    184   PoisonShadow(p, size, 0);
    185   // We are about to unmap a chunk of user memory.
    186   // Mark the corresponding shadow memory as not needed.
    187   FlushUnneededASanShadowMemory(p, size);
    188   // Statistics.
    189   AsanStats &thread_stats = GetCurrentThreadStats();
    190   thread_stats.munmaps++;
    191   thread_stats.munmaped += size;
    192 }
    193 
    194 // We can not use THREADLOCAL because it is not supported on some of the
    195 // platforms we care about (OSX 10.6, Android).
    196 // static THREADLOCAL AllocatorCache cache;
    197 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
    198   CHECK(ms);
    199   return &ms->allocator_cache;
    200 }
    201 
    202 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
    203   CHECK(ms);
    204   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
    205   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
    206 }
    207 
    208 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
    209   quarantine_size_mb = f->quarantine_size_mb;
    210   min_redzone = f->redzone;
    211   max_redzone = f->max_redzone;
    212   may_return_null = cf->allocator_may_return_null;
    213   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
    214 }
    215 
    216 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
    217   f->quarantine_size_mb = quarantine_size_mb;
    218   f->redzone = min_redzone;
    219   f->max_redzone = max_redzone;
    220   cf->allocator_may_return_null = may_return_null;
    221   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
    222 }
    223 
    224 struct Allocator {
    225   static const uptr kMaxAllowedMallocSize =
    226       FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
    227   static const uptr kMaxThreadLocalQuarantine =
    228       FIRST_32_SECOND_64(1 << 18, 1 << 20);
    229 
    230   AsanAllocator allocator;
    231   AsanQuarantine quarantine;
    232   StaticSpinMutex fallback_mutex;
    233   AllocatorCache fallback_allocator_cache;
    234   QuarantineCache fallback_quarantine_cache;
    235 
    236   // ------------------- Options --------------------------
    237   atomic_uint16_t min_redzone;
    238   atomic_uint16_t max_redzone;
    239   atomic_uint8_t alloc_dealloc_mismatch;
    240 
    241   // ------------------- Initialization ------------------------
    242   explicit Allocator(LinkerInitialized)
    243       : quarantine(LINKER_INITIALIZED),
    244         fallback_quarantine_cache(LINKER_INITIALIZED) {}
    245 
    246   void CheckOptions(const AllocatorOptions &options) const {
    247     CHECK_GE(options.min_redzone, 16);
    248     CHECK_GE(options.max_redzone, options.min_redzone);
    249     CHECK_LE(options.max_redzone, 2048);
    250     CHECK(IsPowerOfTwo(options.min_redzone));
    251     CHECK(IsPowerOfTwo(options.max_redzone));
    252   }
    253 
    254   void SharedInitCode(const AllocatorOptions &options) {
    255     CheckOptions(options);
    256     quarantine.Init((uptr)options.quarantine_size_mb << 20,
    257                     kMaxThreadLocalQuarantine);
    258     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
    259                  memory_order_release);
    260     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
    261     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
    262   }
    263 
    264   void Initialize(const AllocatorOptions &options) {
    265     allocator.Init(options.may_return_null);
    266     SharedInitCode(options);
    267   }
    268 
    269   void ReInitialize(const AllocatorOptions &options) {
    270     allocator.SetMayReturnNull(options.may_return_null);
    271     SharedInitCode(options);
    272   }
    273 
    274   void GetOptions(AllocatorOptions *options) const {
    275     options->quarantine_size_mb = quarantine.GetSize() >> 20;
    276     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
    277     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
    278     options->may_return_null = allocator.MayReturnNull();
    279     options->alloc_dealloc_mismatch =
    280         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
    281   }
    282 
    283   // -------------------- Helper methods. -------------------------
    284   uptr ComputeRZLog(uptr user_requested_size) {
    285     u32 rz_log =
    286       user_requested_size <= 64        - 16   ? 0 :
    287       user_requested_size <= 128       - 32   ? 1 :
    288       user_requested_size <= 512       - 64   ? 2 :
    289       user_requested_size <= 4096      - 128  ? 3 :
    290       user_requested_size <= (1 << 14) - 256  ? 4 :
    291       user_requested_size <= (1 << 15) - 512  ? 5 :
    292       user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
    293     u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
    294     u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
    295     return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
    296   }
    297 
    298   // We have an address between two chunks, and we want to report just one.
    299   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
    300                          AsanChunk *right_chunk) {
    301     // Prefer an allocated chunk over freed chunk and freed chunk
    302     // over available chunk.
    303     if (left_chunk->chunk_state != right_chunk->chunk_state) {
    304       if (left_chunk->chunk_state == CHUNK_ALLOCATED)
    305         return left_chunk;
    306       if (right_chunk->chunk_state == CHUNK_ALLOCATED)
    307         return right_chunk;
    308       if (left_chunk->chunk_state == CHUNK_QUARANTINE)
    309         return left_chunk;
    310       if (right_chunk->chunk_state == CHUNK_QUARANTINE)
    311         return right_chunk;
    312     }
    313     // Same chunk_state: choose based on offset.
    314     sptr l_offset = 0, r_offset = 0;
    315     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
    316     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
    317     if (l_offset < r_offset)
    318       return left_chunk;
    319     return right_chunk;
    320   }
    321 
    322   // -------------------- Allocation/Deallocation routines ---------------
    323   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
    324                  AllocType alloc_type, bool can_fill) {
    325     if (UNLIKELY(!asan_inited))
    326       AsanInitFromRtl();
    327     Flags &fl = *flags();
    328     CHECK(stack);
    329     const uptr min_alignment = SHADOW_GRANULARITY;
    330     if (alignment < min_alignment)
    331       alignment = min_alignment;
    332     if (size == 0) {
    333       // We'd be happy to avoid allocating memory for zero-size requests, but
    334       // some programs/tests depend on this behavior and assume that malloc
    335       // would not return NULL even for zero-size allocations. Moreover, it
    336       // looks like operator new should never return NULL, and results of
    337       // consecutive "new" calls must be different even if the allocated size
    338       // is zero.
    339       size = 1;
    340     }
    341     CHECK(IsPowerOfTwo(alignment));
    342     uptr rz_log = ComputeRZLog(size);
    343     uptr rz_size = RZLog2Size(rz_log);
    344     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
    345     uptr needed_size = rounded_size + rz_size;
    346     if (alignment > min_alignment)
    347       needed_size += alignment;
    348     bool using_primary_allocator = true;
    349     // If we are allocating from the secondary allocator, there will be no
    350     // automatic right redzone, so add the right redzone manually.
    351     if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
    352       needed_size += rz_size;
    353       using_primary_allocator = false;
    354     }
    355     CHECK(IsAligned(needed_size, min_alignment));
    356     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
    357       Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
    358              (void*)size);
    359       return allocator.ReturnNullOrDie();
    360     }
    361 
    362     AsanThread *t = GetCurrentThread();
    363     void *allocated;
    364     bool check_rss_limit = true;
    365     if (t) {
    366       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
    367       allocated =
    368           allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
    369     } else {
    370       SpinMutexLock l(&fallback_mutex);
    371       AllocatorCache *cache = &fallback_allocator_cache;
    372       allocated =
    373           allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
    374     }
    375 
    376     if (!allocated)
    377       return allocator.ReturnNullOrDie();
    378 
    379     if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
    380       // Heap poisoning is enabled, but the allocator provides an unpoisoned
    381       // chunk. This is possible if CanPoisonMemory() was false for some
    382       // time, for example, due to flags()->start_disabled.
    383       // Anyway, poison the block before using it for anything else.
    384       uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
    385       PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
    386     }
    387 
    388     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
    389     uptr alloc_end = alloc_beg + needed_size;
    390     uptr beg_plus_redzone = alloc_beg + rz_size;
    391     uptr user_beg = beg_plus_redzone;
    392     if (!IsAligned(user_beg, alignment))
    393       user_beg = RoundUpTo(user_beg, alignment);
    394     uptr user_end = user_beg + size;
    395     CHECK_LE(user_end, alloc_end);
    396     uptr chunk_beg = user_beg - kChunkHeaderSize;
    397     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    398     m->alloc_type = alloc_type;
    399     m->rz_log = rz_log;
    400     u32 alloc_tid = t ? t->tid() : 0;
    401     m->alloc_tid = alloc_tid;
    402     CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
    403     m->free_tid = kInvalidTid;
    404     m->from_memalign = user_beg != beg_plus_redzone;
    405     if (alloc_beg != chunk_beg) {
    406       CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
    407       reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
    408       reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
    409     }
    410     if (using_primary_allocator) {
    411       CHECK(size);
    412       m->user_requested_size = size;
    413       CHECK(allocator.FromPrimary(allocated));
    414     } else {
    415       CHECK(!allocator.FromPrimary(allocated));
    416       m->user_requested_size = SizeClassMap::kMaxSize;
    417       uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
    418       meta[0] = size;
    419       meta[1] = chunk_beg;
    420     }
    421 
    422     m->alloc_context_id = StackDepotPut(*stack);
    423 
    424     uptr size_rounded_down_to_granularity =
    425         RoundDownTo(size, SHADOW_GRANULARITY);
    426     // Unpoison the bulk of the memory region.
    427     if (size_rounded_down_to_granularity)
    428       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
    429     // Deal with the end of the region if size is not aligned to granularity.
    430     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
    431       u8 *shadow =
    432           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
    433       *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
    434     }
    435 
    436     AsanStats &thread_stats = GetCurrentThreadStats();
    437     thread_stats.mallocs++;
    438     thread_stats.malloced += size;
    439     thread_stats.malloced_redzones += needed_size - size;
    440     if (needed_size > SizeClassMap::kMaxSize)
    441       thread_stats.malloc_large++;
    442     else
    443       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
    444 
    445     void *res = reinterpret_cast<void *>(user_beg);
    446     if (can_fill && fl.max_malloc_fill_size) {
    447       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
    448       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
    449     }
    450 #if CAN_SANITIZE_LEAKS
    451     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
    452                                                  : __lsan::kDirectlyLeaked;
    453 #endif
    454     // Must be the last mutation of metadata in this function.
    455     atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
    456     ASAN_MALLOC_HOOK(res, size);
    457     return res;
    458   }
    459 
    460   // Set quarantine flag if chunk is allocated, issue ASan error report on
    461   // available and quarantined chunks. Return true on success, false otherwise.
    462   bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
    463                                    BufferedStackTrace *stack) {
    464     u8 old_chunk_state = CHUNK_ALLOCATED;
    465     // Flip the chunk_state atomically to avoid race on double-free.
    466     if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
    467                                         CHUNK_QUARANTINE,
    468                                         memory_order_acquire)) {
    469       ReportInvalidFree(ptr, old_chunk_state, stack);
    470       // It's not safe to push a chunk in quarantine on invalid free.
    471       return false;
    472     }
    473     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
    474     return true;
    475   }
    476 
    477   // Expects the chunk to already be marked as quarantined by using
    478   // AtomicallySetQuarantineFlagIfAllocated.
    479   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
    480                        AllocType alloc_type) {
    481     CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
    482     CHECK_GE(m->alloc_tid, 0);
    483     if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
    484       CHECK_EQ(m->free_tid, kInvalidTid);
    485     AsanThread *t = GetCurrentThread();
    486     m->free_tid = t ? t->tid() : 0;
    487     m->free_context_id = StackDepotPut(*stack);
    488     // Poison the region.
    489     PoisonShadow(m->Beg(),
    490                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    491                  kAsanHeapFreeMagic);
    492 
    493     AsanStats &thread_stats = GetCurrentThreadStats();
    494     thread_stats.frees++;
    495     thread_stats.freed += m->UsedSize();
    496 
    497     // Push into quarantine.
    498     if (t) {
    499       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
    500       AllocatorCache *ac = GetAllocatorCache(ms);
    501       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
    502                            m->UsedSize());
    503     } else {
    504       SpinMutexLock l(&fallback_mutex);
    505       AllocatorCache *ac = &fallback_allocator_cache;
    506       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
    507                            m->UsedSize());
    508     }
    509   }
    510 
    511   void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
    512                   AllocType alloc_type) {
    513     uptr p = reinterpret_cast<uptr>(ptr);
    514     if (p == 0) return;
    515 
    516     uptr chunk_beg = p - kChunkHeaderSize;
    517     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    518 
    519     ASAN_FREE_HOOK(ptr);
    520     // Must mark the chunk as quarantined before any changes to its metadata.
    521     // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
    522     if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
    523 
    524     if (m->alloc_type != alloc_type) {
    525       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
    526         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
    527                                 (AllocType)alloc_type);
    528       }
    529     }
    530 
    531     if (delete_size && flags()->new_delete_type_mismatch &&
    532         delete_size != m->UsedSize()) {
    533       ReportNewDeleteSizeMismatch(p, m->UsedSize(), delete_size, stack);
    534     }
    535 
    536     QuarantineChunk(m, ptr, stack, alloc_type);
    537   }
    538 
    539   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
    540     CHECK(old_ptr && new_size);
    541     uptr p = reinterpret_cast<uptr>(old_ptr);
    542     uptr chunk_beg = p - kChunkHeaderSize;
    543     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    544 
    545     AsanStats &thread_stats = GetCurrentThreadStats();
    546     thread_stats.reallocs++;
    547     thread_stats.realloced += new_size;
    548 
    549     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
    550     if (new_ptr) {
    551       u8 chunk_state = m->chunk_state;
    552       if (chunk_state != CHUNK_ALLOCATED)
    553         ReportInvalidFree(old_ptr, chunk_state, stack);
    554       CHECK_NE(REAL(memcpy), nullptr);
    555       uptr memcpy_size = Min(new_size, m->UsedSize());
    556       // If realloc() races with free(), we may start copying freed memory.
    557       // However, we will report racy double-free later anyway.
    558       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
    559       Deallocate(old_ptr, 0, stack, FROM_MALLOC);
    560     }
    561     return new_ptr;
    562   }
    563 
    564   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
    565     if (CallocShouldReturnNullDueToOverflow(size, nmemb))
    566       return allocator.ReturnNullOrDie();
    567     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
    568     // If the memory comes from the secondary allocator no need to clear it
    569     // as it comes directly from mmap.
    570     if (ptr && allocator.FromPrimary(ptr))
    571       REAL(memset)(ptr, 0, nmemb * size);
    572     return ptr;
    573   }
    574 
    575   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
    576     if (chunk_state == CHUNK_QUARANTINE)
    577       ReportDoubleFree((uptr)ptr, stack);
    578     else
    579       ReportFreeNotMalloced((uptr)ptr, stack);
    580   }
    581 
    582   void CommitBack(AsanThreadLocalMallocStorage *ms) {
    583     AllocatorCache *ac = GetAllocatorCache(ms);
    584     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
    585     allocator.SwallowCache(ac);
    586   }
    587 
    588   // -------------------------- Chunk lookup ----------------------
    589 
    590   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
    591   AsanChunk *GetAsanChunk(void *alloc_beg) {
    592     if (!alloc_beg) return nullptr;
    593     if (!allocator.FromPrimary(alloc_beg)) {
    594       uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
    595       AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
    596       return m;
    597     }
    598     uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
    599     if (alloc_magic[0] == kAllocBegMagic)
    600       return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
    601     return reinterpret_cast<AsanChunk *>(alloc_beg);
    602   }
    603 
    604   AsanChunk *GetAsanChunkByAddr(uptr p) {
    605     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
    606     return GetAsanChunk(alloc_beg);
    607   }
    608 
    609   // Allocator must be locked when this function is called.
    610   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
    611     void *alloc_beg =
    612         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
    613     return GetAsanChunk(alloc_beg);
    614   }
    615 
    616   uptr AllocationSize(uptr p) {
    617     AsanChunk *m = GetAsanChunkByAddr(p);
    618     if (!m) return 0;
    619     if (m->chunk_state != CHUNK_ALLOCATED) return 0;
    620     if (m->Beg() != p) return 0;
    621     return m->UsedSize();
    622   }
    623 
    624   AsanChunkView FindHeapChunkByAddress(uptr addr) {
    625     AsanChunk *m1 = GetAsanChunkByAddr(addr);
    626     if (!m1) return AsanChunkView(m1);
    627     sptr offset = 0;
    628     if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
    629       // The address is in the chunk's left redzone, so maybe it is actually
    630       // a right buffer overflow from the other chunk to the left.
    631       // Search a bit to the left to see if there is another chunk.
    632       AsanChunk *m2 = nullptr;
    633       for (uptr l = 1; l < GetPageSizeCached(); l++) {
    634         m2 = GetAsanChunkByAddr(addr - l);
    635         if (m2 == m1) continue;  // Still the same chunk.
    636         break;
    637       }
    638       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
    639         m1 = ChooseChunk(addr, m2, m1);
    640     }
    641     return AsanChunkView(m1);
    642   }
    643 
    644   void PrintStats() {
    645     allocator.PrintStats();
    646   }
    647 
    648   void ForceLock() {
    649     allocator.ForceLock();
    650     fallback_mutex.Lock();
    651   }
    652 
    653   void ForceUnlock() {
    654     fallback_mutex.Unlock();
    655     allocator.ForceUnlock();
    656   }
    657 };
    658 
    659 static Allocator instance(LINKER_INITIALIZED);
    660 
    661 static AsanAllocator &get_allocator() {
    662   return instance.allocator;
    663 }
    664 
    665 bool AsanChunkView::IsValid() {
    666   return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
    667 }
    668 bool AsanChunkView::IsAllocated() {
    669   return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
    670 }
    671 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
    672 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
    673 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
    674 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
    675 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
    676 
    677 static StackTrace GetStackTraceFromId(u32 id) {
    678   CHECK(id);
    679   StackTrace res = StackDepotGet(id);
    680   CHECK(res.trace);
    681   return res;
    682 }
    683 
    684 u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
    685 u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
    686 
    687 StackTrace AsanChunkView::GetAllocStack() {
    688   return GetStackTraceFromId(GetAllocStackId());
    689 }
    690 
    691 StackTrace AsanChunkView::GetFreeStack() {
    692   return GetStackTraceFromId(GetFreeStackId());
    693 }
    694 
    695 void InitializeAllocator(const AllocatorOptions &options) {
    696   instance.Initialize(options);
    697 }
    698 
    699 void ReInitializeAllocator(const AllocatorOptions &options) {
    700   instance.ReInitialize(options);
    701 }
    702 
    703 void GetAllocatorOptions(AllocatorOptions *options) {
    704   instance.GetOptions(options);
    705 }
    706 
    707 AsanChunkView FindHeapChunkByAddress(uptr addr) {
    708   return instance.FindHeapChunkByAddress(addr);
    709 }
    710 
    711 void AsanThreadLocalMallocStorage::CommitBack() {
    712   instance.CommitBack(this);
    713 }
    714 
    715 void PrintInternalAllocatorStats() {
    716   instance.PrintStats();
    717 }
    718 
    719 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
    720                     AllocType alloc_type) {
    721   return instance.Allocate(size, alignment, stack, alloc_type, true);
    722 }
    723 
    724 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
    725   instance.Deallocate(ptr, 0, stack, alloc_type);
    726 }
    727 
    728 void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
    729                      AllocType alloc_type) {
    730   instance.Deallocate(ptr, size, stack, alloc_type);
    731 }
    732 
    733 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
    734   return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
    735 }
    736 
    737 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
    738   return instance.Calloc(nmemb, size, stack);
    739 }
    740 
    741 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
    742   if (!p)
    743     return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
    744   if (size == 0) {
    745     instance.Deallocate(p, 0, stack, FROM_MALLOC);
    746     return nullptr;
    747   }
    748   return instance.Reallocate(p, size, stack);
    749 }
    750 
    751 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
    752   return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
    753 }
    754 
    755 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
    756   uptr PageSize = GetPageSizeCached();
    757   size = RoundUpTo(size, PageSize);
    758   if (size == 0) {
    759     // pvalloc(0) should allocate one page.
    760     size = PageSize;
    761   }
    762   return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
    763 }
    764 
    765 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
    766                         BufferedStackTrace *stack) {
    767   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
    768   CHECK(IsAligned((uptr)ptr, alignment));
    769   *memptr = ptr;
    770   return 0;
    771 }
    772 
    773 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
    774   if (!ptr) return 0;
    775   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
    776   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
    777     GET_STACK_TRACE_FATAL(pc, bp);
    778     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
    779   }
    780   return usable_size;
    781 }
    782 
    783 uptr asan_mz_size(const void *ptr) {
    784   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
    785 }
    786 
    787 void asan_mz_force_lock() {
    788   instance.ForceLock();
    789 }
    790 
    791 void asan_mz_force_unlock() {
    792   instance.ForceUnlock();
    793 }
    794 
    795 void AsanSoftRssLimitExceededCallback(bool exceeded) {
    796   instance.allocator.SetRssLimitIsExceeded(exceeded);
    797 }
    798 
    799 } // namespace __asan
    800 
    801 // --- Implementation of LSan-specific functions --- {{{1
    802 namespace __lsan {
    803 void LockAllocator() {
    804   __asan::get_allocator().ForceLock();
    805 }
    806 
    807 void UnlockAllocator() {
    808   __asan::get_allocator().ForceUnlock();
    809 }
    810 
    811 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
    812   *begin = (uptr)&__asan::get_allocator();
    813   *end = *begin + sizeof(__asan::get_allocator());
    814 }
    815 
    816 uptr PointsIntoChunk(void* p) {
    817   uptr addr = reinterpret_cast<uptr>(p);
    818   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
    819   if (!m) return 0;
    820   uptr chunk = m->Beg();
    821   if (m->chunk_state != __asan::CHUNK_ALLOCATED)
    822     return 0;
    823   if (m->AddrIsInside(addr, /*locked_version=*/true))
    824     return chunk;
    825   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
    826                                   addr))
    827     return chunk;
    828   return 0;
    829 }
    830 
    831 uptr GetUserBegin(uptr chunk) {
    832   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
    833   CHECK(m);
    834   return m->Beg();
    835 }
    836 
    837 LsanMetadata::LsanMetadata(uptr chunk) {
    838   metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
    839 }
    840 
    841 bool LsanMetadata::allocated() const {
    842   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    843   return m->chunk_state == __asan::CHUNK_ALLOCATED;
    844 }
    845 
    846 ChunkTag LsanMetadata::tag() const {
    847   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    848   return static_cast<ChunkTag>(m->lsan_tag);
    849 }
    850 
    851 void LsanMetadata::set_tag(ChunkTag value) {
    852   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    853   m->lsan_tag = value;
    854 }
    855 
    856 uptr LsanMetadata::requested_size() const {
    857   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    858   return m->UsedSize(/*locked_version=*/true);
    859 }
    860 
    861 u32 LsanMetadata::stack_trace_id() const {
    862   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    863   return m->alloc_context_id;
    864 }
    865 
    866 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    867   __asan::get_allocator().ForEachChunk(callback, arg);
    868 }
    869 
    870 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
    871   uptr addr = reinterpret_cast<uptr>(p);
    872   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
    873   if (!m) return kIgnoreObjectInvalid;
    874   if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
    875     if (m->lsan_tag == kIgnored)
    876       return kIgnoreObjectAlreadyIgnored;
    877     m->lsan_tag = __lsan::kIgnored;
    878     return kIgnoreObjectSuccess;
    879   } else {
    880     return kIgnoreObjectInvalid;
    881   }
    882 }
    883 }  // namespace __lsan
    884 
    885 // ---------------------- Interface ---------------- {{{1
    886 using namespace __asan;  // NOLINT
    887 
    888 // ASan allocator doesn't reserve extra bytes, so normally we would
    889 // just return "size". We don't want to expose our redzone sizes, etc here.
    890 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
    891   return size;
    892 }
    893 
    894 int __sanitizer_get_ownership(const void *p) {
    895   uptr ptr = reinterpret_cast<uptr>(p);
    896   return instance.AllocationSize(ptr) > 0;
    897 }
    898 
    899 uptr __sanitizer_get_allocated_size(const void *p) {
    900   if (!p) return 0;
    901   uptr ptr = reinterpret_cast<uptr>(p);
    902   uptr allocated_size = instance.AllocationSize(ptr);
    903   // Die if p is not malloced or if it is already freed.
    904   if (allocated_size == 0) {
    905     GET_STACK_TRACE_FATAL_HERE;
    906     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
    907   }
    908   return allocated_size;
    909 }
    910 
    911 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
    912 // Provide default (no-op) implementation of malloc hooks.
    913 extern "C" {
    914 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    915 void __sanitizer_malloc_hook(void *ptr, uptr size) {
    916   (void)ptr;
    917   (void)size;
    918 }
    919 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    920 void __sanitizer_free_hook(void *ptr) {
    921   (void)ptr;
    922 }
    923 } // extern "C"
    924 #endif
    925