Home | History | Annotate | Download | only in asan
      1 //===-- asan_allocator.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Implementation of ASan's memory allocator, 2-nd version.
     13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
     14 // with ThreadSanitizer and MemorySanitizer.
     15 //
     16 //===----------------------------------------------------------------------===//
     17 
     18 #include "asan_allocator.h"
     19 #include "asan_mapping.h"
     20 #include "asan_poisoning.h"
     21 #include "asan_report.h"
     22 #include "asan_stack.h"
     23 #include "asan_thread.h"
     24 #include "sanitizer_common/sanitizer_allocator_interface.h"
     25 #include "sanitizer_common/sanitizer_flags.h"
     26 #include "sanitizer_common/sanitizer_internal_defs.h"
     27 #include "sanitizer_common/sanitizer_list.h"
     28 #include "sanitizer_common/sanitizer_stackdepot.h"
     29 #include "sanitizer_common/sanitizer_quarantine.h"
     30 #include "lsan/lsan_common.h"
     31 
     32 namespace __asan {
     33 
     34 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
     35 // We use adaptive redzones: for larger allocation larger redzones are used.
     36 static u32 RZLog2Size(u32 rz_log) {
     37   CHECK_LT(rz_log, 8);
     38   return 16 << rz_log;
     39 }
     40 
     41 static u32 RZSize2Log(u32 rz_size) {
     42   CHECK_GE(rz_size, 16);
     43   CHECK_LE(rz_size, 2048);
     44   CHECK(IsPowerOfTwo(rz_size));
     45   u32 res = Log2(rz_size) - 4;
     46   CHECK_EQ(rz_size, RZLog2Size(res));
     47   return res;
     48 }
     49 
     50 static AsanAllocator &get_allocator();
     51 
     52 // The memory chunk allocated from the underlying allocator looks like this:
     53 // L L L L L L H H U U U U U U R R
     54 //   L -- left redzone words (0 or more bytes)
     55 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
     56 //   U -- user memory.
     57 //   R -- right redzone (0 or more bytes)
     58 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
     59 // memory.
     60 
     61 // If the left redzone is greater than the ChunkHeader size we store a magic
     62 // value in the first uptr word of the memory block and store the address of
     63 // ChunkBase in the next uptr.
     64 // M B L L L L L L L L L  H H U U U U U U
     65 //   |                    ^
     66 //   ---------------------|
     67 //   M -- magic value kAllocBegMagic
     68 //   B -- address of ChunkHeader pointing to the first 'H'
     69 static const uptr kAllocBegMagic = 0xCC6E96B9;
     70 
     71 struct ChunkHeader {
     72   // 1-st 8 bytes.
     73   u32 chunk_state       : 8;  // Must be first.
     74   u32 alloc_tid         : 24;
     75 
     76   u32 free_tid          : 24;
     77   u32 from_memalign     : 1;
     78   u32 alloc_type        : 2;
     79   u32 rz_log            : 3;
     80   u32 lsan_tag          : 2;
     81   // 2-nd 8 bytes
     82   // This field is used for small sizes. For large sizes it is equal to
     83   // SizeClassMap::kMaxSize and the actual size is stored in the
     84   // SecondaryAllocator's metadata.
     85   u32 user_requested_size;
     86   u32 alloc_context_id;
     87 };
     88 
     89 struct ChunkBase : ChunkHeader {
     90   // Header2, intersects with user memory.
     91   u32 free_context_id;
     92 };
     93 
     94 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
     95 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
     96 COMPILER_CHECK(kChunkHeaderSize == 16);
     97 COMPILER_CHECK(kChunkHeader2Size <= 16);
     98 
     99 // Every chunk of memory allocated by this allocator can be in one of 3 states:
    100 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
    101 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
    102 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
    103 enum {
    104   CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
    105   CHUNK_ALLOCATED  = 2,
    106   CHUNK_QUARANTINE = 3
    107 };
    108 
    109 struct AsanChunk: ChunkBase {
    110   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
    111   uptr UsedSize(bool locked_version = false) {
    112     if (user_requested_size != SizeClassMap::kMaxSize)
    113       return user_requested_size;
    114     return *reinterpret_cast<uptr *>(
    115                get_allocator().GetMetaData(AllocBeg(locked_version)));
    116   }
    117   void *AllocBeg(bool locked_version = false) {
    118     if (from_memalign) {
    119       if (locked_version)
    120         return get_allocator().GetBlockBeginFastLocked(
    121             reinterpret_cast<void *>(this));
    122       return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
    123     }
    124     return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
    125   }
    126   bool AddrIsInside(uptr addr, bool locked_version = false) {
    127     return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
    128   }
    129 };
    130 
    131 struct QuarantineCallback {
    132   explicit QuarantineCallback(AllocatorCache *cache)
    133       : cache_(cache) {
    134   }
    135 
    136   void Recycle(AsanChunk *m) {
    137     CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
    138     atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
    139     CHECK_NE(m->alloc_tid, kInvalidTid);
    140     CHECK_NE(m->free_tid, kInvalidTid);
    141     PoisonShadow(m->Beg(),
    142                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    143                  kAsanHeapLeftRedzoneMagic);
    144     void *p = reinterpret_cast<void *>(m->AllocBeg());
    145     if (p != m) {
    146       uptr *alloc_magic = reinterpret_cast<uptr *>(p);
    147       CHECK_EQ(alloc_magic[0], kAllocBegMagic);
    148       // Clear the magic value, as allocator internals may overwrite the
    149       // contents of deallocated chunk, confusing GetAsanChunk lookup.
    150       alloc_magic[0] = 0;
    151       CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
    152     }
    153 
    154     // Statistics.
    155     AsanStats &thread_stats = GetCurrentThreadStats();
    156     thread_stats.real_frees++;
    157     thread_stats.really_freed += m->UsedSize();
    158 
    159     get_allocator().Deallocate(cache_, p);
    160   }
    161 
    162   void *Allocate(uptr size) {
    163     return get_allocator().Allocate(cache_, size, 1, false);
    164   }
    165 
    166   void Deallocate(void *p) {
    167     get_allocator().Deallocate(cache_, p);
    168   }
    169 
    170   AllocatorCache *cache_;
    171 };
    172 
    173 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
    174 typedef AsanQuarantine::Cache QuarantineCache;
    175 
    176 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
    177   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
    178   // Statistics.
    179   AsanStats &thread_stats = GetCurrentThreadStats();
    180   thread_stats.mmaps++;
    181   thread_stats.mmaped += size;
    182 }
    183 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
    184   PoisonShadow(p, size, 0);
    185   // We are about to unmap a chunk of user memory.
    186   // Mark the corresponding shadow memory as not needed.
    187   FlushUnneededASanShadowMemory(p, size);
    188   // Statistics.
    189   AsanStats &thread_stats = GetCurrentThreadStats();
    190   thread_stats.munmaps++;
    191   thread_stats.munmaped += size;
    192 }
    193 
    194 // We can not use THREADLOCAL because it is not supported on some of the
    195 // platforms we care about (OSX 10.6, Android).
    196 // static THREADLOCAL AllocatorCache cache;
    197 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
    198   CHECK(ms);
    199   return &ms->allocator_cache;
    200 }
    201 
    202 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
    203   CHECK(ms);
    204   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
    205   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
    206 }
    207 
    208 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
    209   quarantine_size_mb = f->quarantine_size_mb;
    210   min_redzone = f->redzone;
    211   max_redzone = f->max_redzone;
    212   may_return_null = cf->allocator_may_return_null;
    213   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
    214 }
    215 
    216 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
    217   f->quarantine_size_mb = quarantine_size_mb;
    218   f->redzone = min_redzone;
    219   f->max_redzone = max_redzone;
    220   cf->allocator_may_return_null = may_return_null;
    221   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
    222 }
    223 
    224 struct Allocator {
    225   static const uptr kMaxAllowedMallocSize =
    226       FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
    227   static const uptr kMaxThreadLocalQuarantine =
    228       FIRST_32_SECOND_64(1 << 18, 1 << 20);
    229 
    230   AsanAllocator allocator;
    231   AsanQuarantine quarantine;
    232   StaticSpinMutex fallback_mutex;
    233   AllocatorCache fallback_allocator_cache;
    234   QuarantineCache fallback_quarantine_cache;
    235 
    236   // ------------------- Options --------------------------
    237   atomic_uint16_t min_redzone;
    238   atomic_uint16_t max_redzone;
    239   atomic_uint8_t alloc_dealloc_mismatch;
    240 
    241   // ------------------- Initialization ------------------------
    242   explicit Allocator(LinkerInitialized)
    243       : quarantine(LINKER_INITIALIZED),
    244         fallback_quarantine_cache(LINKER_INITIALIZED) {}
    245 
    246   void CheckOptions(const AllocatorOptions &options) const {
    247     CHECK_GE(options.min_redzone, 16);
    248     CHECK_GE(options.max_redzone, options.min_redzone);
    249     CHECK_LE(options.max_redzone, 2048);
    250     CHECK(IsPowerOfTwo(options.min_redzone));
    251     CHECK(IsPowerOfTwo(options.max_redzone));
    252   }
    253 
    254   void SharedInitCode(const AllocatorOptions &options) {
    255     CheckOptions(options);
    256     quarantine.Init((uptr)options.quarantine_size_mb << 20,
    257                     kMaxThreadLocalQuarantine);
    258     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
    259                  memory_order_release);
    260     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
    261     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
    262   }
    263 
    264   void Initialize(const AllocatorOptions &options) {
    265     allocator.Init(options.may_return_null);
    266     SharedInitCode(options);
    267   }
    268 
    269   void ReInitialize(const AllocatorOptions &options) {
    270     allocator.SetMayReturnNull(options.may_return_null);
    271     SharedInitCode(options);
    272   }
    273 
    274   void GetOptions(AllocatorOptions *options) const {
    275     options->quarantine_size_mb = quarantine.GetSize() >> 20;
    276     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
    277     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
    278     options->may_return_null = allocator.MayReturnNull();
    279     options->alloc_dealloc_mismatch =
    280         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
    281   }
    282 
    283   // -------------------- Helper methods. -------------------------
    284   uptr ComputeRZLog(uptr user_requested_size) {
    285     u32 rz_log =
    286       user_requested_size <= 64        - 16   ? 0 :
    287       user_requested_size <= 128       - 32   ? 1 :
    288       user_requested_size <= 512       - 64   ? 2 :
    289       user_requested_size <= 4096      - 128  ? 3 :
    290       user_requested_size <= (1 << 14) - 256  ? 4 :
    291       user_requested_size <= (1 << 15) - 512  ? 5 :
    292       user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
    293     u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
    294     u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
    295     return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
    296   }
    297 
    298   // We have an address between two chunks, and we want to report just one.
    299   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
    300                          AsanChunk *right_chunk) {
    301     // Prefer an allocated chunk over freed chunk and freed chunk
    302     // over available chunk.
    303     if (left_chunk->chunk_state != right_chunk->chunk_state) {
    304       if (left_chunk->chunk_state == CHUNK_ALLOCATED)
    305         return left_chunk;
    306       if (right_chunk->chunk_state == CHUNK_ALLOCATED)
    307         return right_chunk;
    308       if (left_chunk->chunk_state == CHUNK_QUARANTINE)
    309         return left_chunk;
    310       if (right_chunk->chunk_state == CHUNK_QUARANTINE)
    311         return right_chunk;
    312     }
    313     // Same chunk_state: choose based on offset.
    314     sptr l_offset = 0, r_offset = 0;
    315     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
    316     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
    317     if (l_offset < r_offset)
    318       return left_chunk;
    319     return right_chunk;
    320   }
    321 
    322   // -------------------- Allocation/Deallocation routines ---------------
    323   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
    324                  AllocType alloc_type, bool can_fill) {
    325     if (UNLIKELY(!asan_inited))
    326       AsanInitFromRtl();
    327     Flags &fl = *flags();
    328     CHECK(stack);
    329     const uptr min_alignment = SHADOW_GRANULARITY;
    330     if (alignment < min_alignment)
    331       alignment = min_alignment;
    332     if (size == 0) {
    333       // We'd be happy to avoid allocating memory for zero-size requests, but
    334       // some programs/tests depend on this behavior and assume that malloc
    335       // would not return NULL even for zero-size allocations. Moreover, it
    336       // looks like operator new should never return NULL, and results of
    337       // consecutive "new" calls must be different even if the allocated size
    338       // is zero.
    339       size = 1;
    340     }
    341     CHECK(IsPowerOfTwo(alignment));
    342     uptr rz_log = ComputeRZLog(size);
    343     uptr rz_size = RZLog2Size(rz_log);
    344     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
    345     uptr needed_size = rounded_size + rz_size;
    346     if (alignment > min_alignment)
    347       needed_size += alignment;
    348     bool using_primary_allocator = true;
    349     // If we are allocating from the secondary allocator, there will be no
    350     // automatic right redzone, so add the right redzone manually.
    351     if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
    352       needed_size += rz_size;
    353       using_primary_allocator = false;
    354     }
    355     CHECK(IsAligned(needed_size, min_alignment));
    356     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
    357       Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
    358              (void*)size);
    359       return allocator.ReturnNullOrDie();
    360     }
    361 
    362     AsanThread *t = GetCurrentThread();
    363     void *allocated;
    364     bool check_rss_limit = true;
    365     if (t) {
    366       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
    367       allocated =
    368           allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
    369     } else {
    370       SpinMutexLock l(&fallback_mutex);
    371       AllocatorCache *cache = &fallback_allocator_cache;
    372       allocated =
    373           allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
    374     }
    375 
    376     if (!allocated)
    377       return allocator.ReturnNullOrDie();
    378 
    379     if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
    380       // Heap poisoning is enabled, but the allocator provides an unpoisoned
    381       // chunk. This is possible if CanPoisonMemory() was false for some
    382       // time, for example, due to flags()->start_disabled.
    383       // Anyway, poison the block before using it for anything else.
    384       uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
    385       PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
    386     }
    387 
    388     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
    389     uptr alloc_end = alloc_beg + needed_size;
    390     uptr beg_plus_redzone = alloc_beg + rz_size;
    391     uptr user_beg = beg_plus_redzone;
    392     if (!IsAligned(user_beg, alignment))
    393       user_beg = RoundUpTo(user_beg, alignment);
    394     uptr user_end = user_beg + size;
    395     CHECK_LE(user_end, alloc_end);
    396     uptr chunk_beg = user_beg - kChunkHeaderSize;
    397     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    398     m->alloc_type = alloc_type;
    399     m->rz_log = rz_log;
    400     u32 alloc_tid = t ? t->tid() : 0;
    401     m->alloc_tid = alloc_tid;
    402     CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
    403     m->free_tid = kInvalidTid;
    404     m->from_memalign = user_beg != beg_plus_redzone;
    405     if (alloc_beg != chunk_beg) {
    406       CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
    407       reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
    408       reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
    409     }
    410     if (using_primary_allocator) {
    411       CHECK(size);
    412       m->user_requested_size = size;
    413       CHECK(allocator.FromPrimary(allocated));
    414     } else {
    415       CHECK(!allocator.FromPrimary(allocated));
    416       m->user_requested_size = SizeClassMap::kMaxSize;
    417       uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
    418       meta[0] = size;
    419       meta[1] = chunk_beg;
    420     }
    421 
    422     m->alloc_context_id = StackDepotPut(*stack);
    423 
    424     uptr size_rounded_down_to_granularity =
    425         RoundDownTo(size, SHADOW_GRANULARITY);
    426     // Unpoison the bulk of the memory region.
    427     if (size_rounded_down_to_granularity)
    428       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
    429     // Deal with the end of the region if size is not aligned to granularity.
    430     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
    431       u8 *shadow =
    432           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
    433       *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
    434     }
    435 
    436     AsanStats &thread_stats = GetCurrentThreadStats();
    437     thread_stats.mallocs++;
    438     thread_stats.malloced += size;
    439     thread_stats.malloced_redzones += needed_size - size;
    440     if (needed_size > SizeClassMap::kMaxSize)
    441       thread_stats.malloc_large++;
    442     else
    443       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
    444 
    445     void *res = reinterpret_cast<void *>(user_beg);
    446     if (can_fill && fl.max_malloc_fill_size) {
    447       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
    448       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
    449     }
    450 #if CAN_SANITIZE_LEAKS
    451     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
    452                                                  : __lsan::kDirectlyLeaked;
    453 #endif
    454     // Must be the last mutation of metadata in this function.
    455     atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
    456     ASAN_MALLOC_HOOK(res, size);
    457     return res;
    458   }
    459 
    460   void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
    461                                    BufferedStackTrace *stack) {
    462     u8 old_chunk_state = CHUNK_ALLOCATED;
    463     // Flip the chunk_state atomically to avoid race on double-free.
    464     if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
    465                                         CHUNK_QUARANTINE, memory_order_acquire))
    466       ReportInvalidFree(ptr, old_chunk_state, stack);
    467     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
    468   }
    469 
    470   // Expects the chunk to already be marked as quarantined by using
    471   // AtomicallySetQuarantineFlag.
    472   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
    473                        AllocType alloc_type) {
    474     CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
    475 
    476     if (m->alloc_type != alloc_type) {
    477       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
    478         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
    479                                 (AllocType)alloc_type);
    480       }
    481     }
    482 
    483     CHECK_GE(m->alloc_tid, 0);
    484     if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
    485       CHECK_EQ(m->free_tid, kInvalidTid);
    486     AsanThread *t = GetCurrentThread();
    487     m->free_tid = t ? t->tid() : 0;
    488     m->free_context_id = StackDepotPut(*stack);
    489     // Poison the region.
    490     PoisonShadow(m->Beg(),
    491                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    492                  kAsanHeapFreeMagic);
    493 
    494     AsanStats &thread_stats = GetCurrentThreadStats();
    495     thread_stats.frees++;
    496     thread_stats.freed += m->UsedSize();
    497 
    498     // Push into quarantine.
    499     if (t) {
    500       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
    501       AllocatorCache *ac = GetAllocatorCache(ms);
    502       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
    503                            m->UsedSize());
    504     } else {
    505       SpinMutexLock l(&fallback_mutex);
    506       AllocatorCache *ac = &fallback_allocator_cache;
    507       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
    508                            m->UsedSize());
    509     }
    510   }
    511 
    512   void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
    513                   AllocType alloc_type) {
    514     uptr p = reinterpret_cast<uptr>(ptr);
    515     if (p == 0) return;
    516 
    517     uptr chunk_beg = p - kChunkHeaderSize;
    518     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    519     if (delete_size && flags()->new_delete_type_mismatch &&
    520         delete_size != m->UsedSize()) {
    521       ReportNewDeleteSizeMismatch(p, delete_size, stack);
    522     }
    523     ASAN_FREE_HOOK(ptr);
    524     // Must mark the chunk as quarantined before any changes to its metadata.
    525     AtomicallySetQuarantineFlag(m, ptr, stack);
    526     QuarantineChunk(m, ptr, stack, alloc_type);
    527   }
    528 
    529   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
    530     CHECK(old_ptr && new_size);
    531     uptr p = reinterpret_cast<uptr>(old_ptr);
    532     uptr chunk_beg = p - kChunkHeaderSize;
    533     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    534 
    535     AsanStats &thread_stats = GetCurrentThreadStats();
    536     thread_stats.reallocs++;
    537     thread_stats.realloced += new_size;
    538 
    539     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
    540     if (new_ptr) {
    541       u8 chunk_state = m->chunk_state;
    542       if (chunk_state != CHUNK_ALLOCATED)
    543         ReportInvalidFree(old_ptr, chunk_state, stack);
    544       CHECK_NE(REAL(memcpy), nullptr);
    545       uptr memcpy_size = Min(new_size, m->UsedSize());
    546       // If realloc() races with free(), we may start copying freed memory.
    547       // However, we will report racy double-free later anyway.
    548       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
    549       Deallocate(old_ptr, 0, stack, FROM_MALLOC);
    550     }
    551     return new_ptr;
    552   }
    553 
    554   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
    555     if (CallocShouldReturnNullDueToOverflow(size, nmemb))
    556       return allocator.ReturnNullOrDie();
    557     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
    558     // If the memory comes from the secondary allocator no need to clear it
    559     // as it comes directly from mmap.
    560     if (ptr && allocator.FromPrimary(ptr))
    561       REAL(memset)(ptr, 0, nmemb * size);
    562     return ptr;
    563   }
    564 
    565   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
    566     if (chunk_state == CHUNK_QUARANTINE)
    567       ReportDoubleFree((uptr)ptr, stack);
    568     else
    569       ReportFreeNotMalloced((uptr)ptr, stack);
    570   }
    571 
    572   void CommitBack(AsanThreadLocalMallocStorage *ms) {
    573     AllocatorCache *ac = GetAllocatorCache(ms);
    574     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
    575     allocator.SwallowCache(ac);
    576   }
    577 
    578   // -------------------------- Chunk lookup ----------------------
    579 
    580   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
    581   AsanChunk *GetAsanChunk(void *alloc_beg) {
    582     if (!alloc_beg) return nullptr;
    583     if (!allocator.FromPrimary(alloc_beg)) {
    584       uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
    585       AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
    586       return m;
    587     }
    588     uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
    589     if (alloc_magic[0] == kAllocBegMagic)
    590       return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
    591     return reinterpret_cast<AsanChunk *>(alloc_beg);
    592   }
    593 
    594   AsanChunk *GetAsanChunkByAddr(uptr p) {
    595     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
    596     return GetAsanChunk(alloc_beg);
    597   }
    598 
    599   // Allocator must be locked when this function is called.
    600   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
    601     void *alloc_beg =
    602         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
    603     return GetAsanChunk(alloc_beg);
    604   }
    605 
    606   uptr AllocationSize(uptr p) {
    607     AsanChunk *m = GetAsanChunkByAddr(p);
    608     if (!m) return 0;
    609     if (m->chunk_state != CHUNK_ALLOCATED) return 0;
    610     if (m->Beg() != p) return 0;
    611     return m->UsedSize();
    612   }
    613 
    614   AsanChunkView FindHeapChunkByAddress(uptr addr) {
    615     AsanChunk *m1 = GetAsanChunkByAddr(addr);
    616     if (!m1) return AsanChunkView(m1);
    617     sptr offset = 0;
    618     if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
    619       // The address is in the chunk's left redzone, so maybe it is actually
    620       // a right buffer overflow from the other chunk to the left.
    621       // Search a bit to the left to see if there is another chunk.
    622       AsanChunk *m2 = nullptr;
    623       for (uptr l = 1; l < GetPageSizeCached(); l++) {
    624         m2 = GetAsanChunkByAddr(addr - l);
    625         if (m2 == m1) continue;  // Still the same chunk.
    626         break;
    627       }
    628       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
    629         m1 = ChooseChunk(addr, m2, m1);
    630     }
    631     return AsanChunkView(m1);
    632   }
    633 
    634   void PrintStats() {
    635     allocator.PrintStats();
    636   }
    637 
    638   void ForceLock() {
    639     allocator.ForceLock();
    640     fallback_mutex.Lock();
    641   }
    642 
    643   void ForceUnlock() {
    644     fallback_mutex.Unlock();
    645     allocator.ForceUnlock();
    646   }
    647 };
    648 
    649 static Allocator instance(LINKER_INITIALIZED);
    650 
    651 static AsanAllocator &get_allocator() {
    652   return instance.allocator;
    653 }
    654 
    655 bool AsanChunkView::IsValid() {
    656   return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
    657 }
    658 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
    659 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
    660 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
    661 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
    662 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
    663 
    664 static StackTrace GetStackTraceFromId(u32 id) {
    665   CHECK(id);
    666   StackTrace res = StackDepotGet(id);
    667   CHECK(res.trace);
    668   return res;
    669 }
    670 
    671 StackTrace AsanChunkView::GetAllocStack() {
    672   return GetStackTraceFromId(chunk_->alloc_context_id);
    673 }
    674 
    675 StackTrace AsanChunkView::GetFreeStack() {
    676   return GetStackTraceFromId(chunk_->free_context_id);
    677 }
    678 
    679 void InitializeAllocator(const AllocatorOptions &options) {
    680   instance.Initialize(options);
    681 }
    682 
    683 void ReInitializeAllocator(const AllocatorOptions &options) {
    684   instance.ReInitialize(options);
    685 }
    686 
    687 void GetAllocatorOptions(AllocatorOptions *options) {
    688   instance.GetOptions(options);
    689 }
    690 
    691 AsanChunkView FindHeapChunkByAddress(uptr addr) {
    692   return instance.FindHeapChunkByAddress(addr);
    693 }
    694 
    695 void AsanThreadLocalMallocStorage::CommitBack() {
    696   instance.CommitBack(this);
    697 }
    698 
    699 void PrintInternalAllocatorStats() {
    700   instance.PrintStats();
    701 }
    702 
    703 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
    704                     AllocType alloc_type) {
    705   return instance.Allocate(size, alignment, stack, alloc_type, true);
    706 }
    707 
    708 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
    709   instance.Deallocate(ptr, 0, stack, alloc_type);
    710 }
    711 
    712 void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
    713                      AllocType alloc_type) {
    714   instance.Deallocate(ptr, size, stack, alloc_type);
    715 }
    716 
    717 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
    718   return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
    719 }
    720 
    721 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
    722   return instance.Calloc(nmemb, size, stack);
    723 }
    724 
    725 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
    726   if (!p)
    727     return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
    728   if (size == 0) {
    729     instance.Deallocate(p, 0, stack, FROM_MALLOC);
    730     return nullptr;
    731   }
    732   return instance.Reallocate(p, size, stack);
    733 }
    734 
    735 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
    736   return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
    737 }
    738 
    739 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
    740   uptr PageSize = GetPageSizeCached();
    741   size = RoundUpTo(size, PageSize);
    742   if (size == 0) {
    743     // pvalloc(0) should allocate one page.
    744     size = PageSize;
    745   }
    746   return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
    747 }
    748 
    749 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
    750                         BufferedStackTrace *stack) {
    751   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
    752   CHECK(IsAligned((uptr)ptr, alignment));
    753   *memptr = ptr;
    754   return 0;
    755 }
    756 
    757 uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
    758   if (!ptr) return 0;
    759   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
    760   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
    761     GET_STACK_TRACE_FATAL(pc, bp);
    762     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
    763   }
    764   return usable_size;
    765 }
    766 
    767 uptr asan_mz_size(const void *ptr) {
    768   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
    769 }
    770 
    771 void asan_mz_force_lock() {
    772   instance.ForceLock();
    773 }
    774 
    775 void asan_mz_force_unlock() {
    776   instance.ForceUnlock();
    777 }
    778 
    779 void AsanSoftRssLimitExceededCallback(bool exceeded) {
    780   instance.allocator.SetRssLimitIsExceeded(exceeded);
    781 }
    782 
    783 } // namespace __asan
    784 
    785 // --- Implementation of LSan-specific functions --- {{{1
    786 namespace __lsan {
    787 void LockAllocator() {
    788   __asan::get_allocator().ForceLock();
    789 }
    790 
    791 void UnlockAllocator() {
    792   __asan::get_allocator().ForceUnlock();
    793 }
    794 
    795 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
    796   *begin = (uptr)&__asan::get_allocator();
    797   *end = *begin + sizeof(__asan::get_allocator());
    798 }
    799 
    800 uptr PointsIntoChunk(void* p) {
    801   uptr addr = reinterpret_cast<uptr>(p);
    802   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
    803   if (!m) return 0;
    804   uptr chunk = m->Beg();
    805   if (m->chunk_state != __asan::CHUNK_ALLOCATED)
    806     return 0;
    807   if (m->AddrIsInside(addr, /*locked_version=*/true))
    808     return chunk;
    809   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
    810                                   addr))
    811     return chunk;
    812   return 0;
    813 }
    814 
    815 uptr GetUserBegin(uptr chunk) {
    816   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
    817   CHECK(m);
    818   return m->Beg();
    819 }
    820 
    821 LsanMetadata::LsanMetadata(uptr chunk) {
    822   metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
    823 }
    824 
    825 bool LsanMetadata::allocated() const {
    826   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    827   return m->chunk_state == __asan::CHUNK_ALLOCATED;
    828 }
    829 
    830 ChunkTag LsanMetadata::tag() const {
    831   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    832   return static_cast<ChunkTag>(m->lsan_tag);
    833 }
    834 
    835 void LsanMetadata::set_tag(ChunkTag value) {
    836   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    837   m->lsan_tag = value;
    838 }
    839 
    840 uptr LsanMetadata::requested_size() const {
    841   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    842   return m->UsedSize(/*locked_version=*/true);
    843 }
    844 
    845 u32 LsanMetadata::stack_trace_id() const {
    846   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
    847   return m->alloc_context_id;
    848 }
    849 
    850 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    851   __asan::get_allocator().ForEachChunk(callback, arg);
    852 }
    853 
    854 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
    855   uptr addr = reinterpret_cast<uptr>(p);
    856   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
    857   if (!m) return kIgnoreObjectInvalid;
    858   if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
    859     if (m->lsan_tag == kIgnored)
    860       return kIgnoreObjectAlreadyIgnored;
    861     m->lsan_tag = __lsan::kIgnored;
    862     return kIgnoreObjectSuccess;
    863   } else {
    864     return kIgnoreObjectInvalid;
    865   }
    866 }
    867 }  // namespace __lsan
    868 
    869 // ---------------------- Interface ---------------- {{{1
    870 using namespace __asan;  // NOLINT
    871 
    872 // ASan allocator doesn't reserve extra bytes, so normally we would
    873 // just return "size". We don't want to expose our redzone sizes, etc here.
    874 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
    875   return size;
    876 }
    877 
    878 int __sanitizer_get_ownership(const void *p) {
    879   uptr ptr = reinterpret_cast<uptr>(p);
    880   return instance.AllocationSize(ptr) > 0;
    881 }
    882 
    883 uptr __sanitizer_get_allocated_size(const void *p) {
    884   if (!p) return 0;
    885   uptr ptr = reinterpret_cast<uptr>(p);
    886   uptr allocated_size = instance.AllocationSize(ptr);
    887   // Die if p is not malloced or if it is already freed.
    888   if (allocated_size == 0) {
    889     GET_STACK_TRACE_FATAL_HERE;
    890     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
    891   }
    892   return allocated_size;
    893 }
    894 
    895 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
    896 // Provide default (no-op) implementation of malloc hooks.
    897 extern "C" {
    898 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    899 void __sanitizer_malloc_hook(void *ptr, uptr size) {
    900   (void)ptr;
    901   (void)size;
    902 }
    903 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    904 void __sanitizer_free_hook(void *ptr) {
    905   (void)ptr;
    906 }
    907 } // extern "C"
    908 #endif
    909