Home | History | Annotate | Download | only in asan
      1 //===-- asan_allocator2.cc ------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Implementation of ASan's memory allocator, 2-nd version.
     13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
     14 // with ThreadSanitizer and MemorySanitizer.
     15 //
     16 // Status: under development, not enabled by default yet.
     17 //===----------------------------------------------------------------------===//
     18 #include "asan_allocator.h"
     19 #if ASAN_ALLOCATOR_VERSION == 2
     20 
     21 #include "asan_mapping.h"
     22 #include "asan_report.h"
     23 #include "asan_thread.h"
     24 #include "asan_thread_registry.h"
     25 #include "sanitizer_common/sanitizer_allocator.h"
     26 #include "sanitizer_common/sanitizer_internal_defs.h"
     27 #include "sanitizer_common/sanitizer_list.h"
     28 #include "sanitizer_common/sanitizer_stackdepot.h"
     29 #include "sanitizer_common/sanitizer_quarantine.h"
     30 
     31 namespace __asan {
     32 
     33 struct AsanMapUnmapCallback {
     34   void OnMap(uptr p, uptr size) const {
     35     PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
     36     // Statistics.
     37     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
     38     thread_stats.mmaps++;
     39     thread_stats.mmaped += size;
     40   }
     41   void OnUnmap(uptr p, uptr size) const {
     42     PoisonShadow(p, size, 0);
     43     // We are about to unmap a chunk of user memory.
     44     // Mark the corresponding shadow memory as not needed.
     45     // Since asan's mapping is compacting, the shadow chunk may be
     46     // not page-aligned, so we only flush the page-aligned portion.
     47     uptr page_size = GetPageSizeCached();
     48     uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
     49     uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
     50     FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
     51     // Statistics.
     52     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
     53     thread_stats.munmaps++;
     54     thread_stats.munmaped += size;
     55   }
     56 };
     57 
     58 #if SANITIZER_WORDSIZE == 64
     59 #if defined(__powerpc64__)
     60 const uptr kAllocatorSpace =  0xa0000000000ULL;
     61 #else
     62 const uptr kAllocatorSpace = 0x600000000000ULL;
     63 #endif
     64 const uptr kAllocatorSize  =  0x40000000000ULL;  // 4T.
     65 typedef DefaultSizeClassMap SizeClassMap;
     66 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
     67     SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
     68 #elif SANITIZER_WORDSIZE == 32
     69 static const u64 kAddressSpaceSize = 1ULL << 32;
     70 typedef CompactSizeClassMap SizeClassMap;
     71 typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
     72   SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
     73 #endif
     74 
     75 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
     76 typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
     77 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
     78     SecondaryAllocator> Allocator;
     79 
     80 // We can not use THREADLOCAL because it is not supported on some of the
     81 // platforms we care about (OSX 10.6, Android).
     82 // static THREADLOCAL AllocatorCache cache;
     83 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
     84   CHECK(ms);
     85   CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
     86   return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
     87 }
     88 
     89 static Allocator allocator;
     90 
     91 static const uptr kMaxAllowedMallocSize =
     92   FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
     93 
     94 static const uptr kMaxThreadLocalQuarantine =
     95   FIRST_32_SECOND_64(1 << 18, 1 << 20);
     96 
     97 // Every chunk of memory allocated by this allocator can be in one of 3 states:
     98 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
     99 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
    100 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
    101 enum {
    102   CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
    103   CHUNK_ALLOCATED  = 2,
    104   CHUNK_QUARANTINE = 3
    105 };
    106 
    107 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
    108 // We use adaptive redzones: for larger allocation larger redzones are used.
    109 static u32 RZLog2Size(u32 rz_log) {
    110   CHECK_LT(rz_log, 8);
    111   return 16 << rz_log;
    112 }
    113 
    114 static u32 RZSize2Log(u32 rz_size) {
    115   CHECK_GE(rz_size, 16);
    116   CHECK_LE(rz_size, 2048);
    117   CHECK(IsPowerOfTwo(rz_size));
    118   u32 res = Log2(rz_size) - 4;
    119   CHECK_EQ(rz_size, RZLog2Size(res));
    120   return res;
    121 }
    122 
    123 static uptr ComputeRZLog(uptr user_requested_size) {
    124   u32 rz_log =
    125     user_requested_size <= 64        - 16   ? 0 :
    126     user_requested_size <= 128       - 32   ? 1 :
    127     user_requested_size <= 512       - 64   ? 2 :
    128     user_requested_size <= 4096      - 128  ? 3 :
    129     user_requested_size <= (1 << 14) - 256  ? 4 :
    130     user_requested_size <= (1 << 15) - 512  ? 5 :
    131     user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
    132   return Max(rz_log, RZSize2Log(flags()->redzone));
    133 }
    134 
    135 // The memory chunk allocated from the underlying allocator looks like this:
    136 // L L L L L L H H U U U U U U R R
    137 //   L -- left redzone words (0 or more bytes)
    138 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
    139 //   U -- user memory.
    140 //   R -- right redzone (0 or more bytes)
    141 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
    142 // memory.
    143 
    144 // If a memory chunk is allocated by memalign and we had to increase the
    145 // allocation size to achieve the proper alignment, then we store this magic
    146 // value in the first uptr word of the memory block and store the address of
    147 // ChunkBase in the next uptr.
    148 // M B ? ? ? L L L L L L  H H U U U U U U
    149 //   M -- magic value kMemalignMagic
    150 //   B -- address of ChunkHeader pointing to the first 'H'
    151 static const uptr kMemalignMagic = 0xCC6E96B9;
    152 
    153 struct ChunkHeader {
    154   // 1-st 8 bytes.
    155   u32 chunk_state       : 8;  // Must be first.
    156   u32 alloc_tid         : 24;
    157 
    158   u32 free_tid          : 24;
    159   u32 from_memalign     : 1;
    160   u32 alloc_type        : 2;
    161   u32 rz_log            : 3;
    162   // 2-nd 8 bytes
    163   // This field is used for small sizes. For large sizes it is equal to
    164   // SizeClassMap::kMaxSize and the actual size is stored in the
    165   // SecondaryAllocator's metadata.
    166   u32 user_requested_size;
    167   u32 alloc_context_id;
    168 };
    169 
    170 struct ChunkBase : ChunkHeader {
    171   // Header2, intersects with user memory.
    172   AsanChunk *next;
    173   u32 free_context_id;
    174 };
    175 
    176 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
    177 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
    178 COMPILER_CHECK(kChunkHeaderSize == 16);
    179 COMPILER_CHECK(kChunkHeader2Size <= 16);
    180 
    181 struct AsanChunk: ChunkBase {
    182   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
    183   uptr UsedSize() {
    184     if (user_requested_size != SizeClassMap::kMaxSize)
    185       return user_requested_size;
    186     return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
    187   }
    188   void *AllocBeg() {
    189     if (from_memalign)
    190       return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
    191     return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
    192   }
    193   // We store the alloc/free stack traces in the chunk itself.
    194   u32 *AllocStackBeg() {
    195     return (u32*)(Beg() - RZLog2Size(rz_log));
    196   }
    197   uptr AllocStackSize() {
    198     CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
    199     return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
    200   }
    201   u32 *FreeStackBeg() {
    202     return (u32*)(Beg() + kChunkHeader2Size);
    203   }
    204   uptr FreeStackSize() {
    205     if (user_requested_size < kChunkHeader2Size) return 0;
    206     uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
    207     return (available - kChunkHeader2Size) / sizeof(u32);
    208   }
    209 };
    210 
    211 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
    212 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
    213 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
    214 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
    215 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
    216 
    217 static void GetStackTraceFromId(u32 id, StackTrace *stack) {
    218   CHECK(id);
    219   uptr size = 0;
    220   const uptr *trace = StackDepotGet(id, &size);
    221   CHECK_LT(size, kStackTraceMax);
    222   internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
    223   stack->size = size;
    224 }
    225 
    226 void AsanChunkView::GetAllocStack(StackTrace *stack) {
    227   if (flags()->use_stack_depot)
    228     GetStackTraceFromId(chunk_->alloc_context_id, stack);
    229   else
    230     StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
    231                                 chunk_->AllocStackSize());
    232 }
    233 
    234 void AsanChunkView::GetFreeStack(StackTrace *stack) {
    235   if (flags()->use_stack_depot)
    236     GetStackTraceFromId(chunk_->free_context_id, stack);
    237   else
    238     StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
    239                                 chunk_->FreeStackSize());
    240 }
    241 
    242 struct QuarantineCallback;
    243 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
    244 typedef AsanQuarantine::Cache QuarantineCache;
    245 static AsanQuarantine quarantine(LINKER_INITIALIZED);
    246 static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
    247 static AllocatorCache fallback_allocator_cache;
    248 static SpinMutex fallback_mutex;
    249 
    250 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
    251   CHECK(ms);
    252   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
    253   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
    254 }
    255 
    256 struct QuarantineCallback {
    257   explicit QuarantineCallback(AllocatorCache *cache)
    258       : cache_(cache) {
    259   }
    260 
    261   void Recycle(AsanChunk *m) {
    262     CHECK(m->chunk_state == CHUNK_QUARANTINE);
    263     m->chunk_state = CHUNK_AVAILABLE;
    264     CHECK_NE(m->alloc_tid, kInvalidTid);
    265     CHECK_NE(m->free_tid, kInvalidTid);
    266     PoisonShadow(m->Beg(),
    267                  RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    268                  kAsanHeapLeftRedzoneMagic);
    269     void *p = reinterpret_cast<void *>(m->AllocBeg());
    270     if (m->from_memalign) {
    271       uptr *memalign_magic = reinterpret_cast<uptr *>(p);
    272       CHECK_EQ(memalign_magic[0], kMemalignMagic);
    273       CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
    274     }
    275 
    276     // Statistics.
    277     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    278     thread_stats.real_frees++;
    279     thread_stats.really_freed += m->UsedSize();
    280 
    281     allocator.Deallocate(cache_, p);
    282   }
    283 
    284   void *Allocate(uptr size) {
    285     return allocator.Allocate(cache_, size, 1, false);
    286   }
    287 
    288   void Deallocate(void *p) {
    289     allocator.Deallocate(cache_, p);
    290   }
    291 
    292   AllocatorCache *cache_;
    293 };
    294 
    295 void InitializeAllocator() {
    296   allocator.Init();
    297   quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
    298 }
    299 
    300 static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
    301                       AllocType alloc_type) {
    302   if (!asan_inited)
    303     __asan_init();
    304   CHECK(stack);
    305   const uptr min_alignment = SHADOW_GRANULARITY;
    306   if (alignment < min_alignment)
    307     alignment = min_alignment;
    308   if (size == 0) {
    309     // We'd be happy to avoid allocating memory for zero-size requests, but
    310     // some programs/tests depend on this behavior and assume that malloc would
    311     // not return NULL even for zero-size allocations. Moreover, it looks like
    312     // operator new should never return NULL, and results of consecutive "new"
    313     // calls must be different even if the allocated size is zero.
    314     size = 1;
    315   }
    316   CHECK(IsPowerOfTwo(alignment));
    317   uptr rz_log = ComputeRZLog(size);
    318   uptr rz_size = RZLog2Size(rz_log);
    319   uptr rounded_size = RoundUpTo(size, alignment);
    320   if (rounded_size < kChunkHeader2Size)
    321     rounded_size = kChunkHeader2Size;
    322   uptr needed_size = rounded_size + rz_size;
    323   if (alignment > min_alignment)
    324     needed_size += alignment;
    325   bool using_primary_allocator = true;
    326   // If we are allocating from the secondary allocator, there will be no
    327   // automatic right redzone, so add the right redzone manually.
    328   if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
    329     needed_size += rz_size;
    330     using_primary_allocator = false;
    331   }
    332   CHECK(IsAligned(needed_size, min_alignment));
    333   if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
    334     Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
    335            (void*)size);
    336     return 0;
    337   }
    338 
    339   AsanThread *t = asanThreadRegistry().GetCurrent();
    340   void *allocated;
    341   if (t) {
    342     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
    343     allocated = allocator.Allocate(cache, needed_size, 8, false);
    344   } else {
    345     SpinMutexLock l(&fallback_mutex);
    346     AllocatorCache *cache = &fallback_allocator_cache;
    347     allocated = allocator.Allocate(cache, needed_size, 8, false);
    348   }
    349   uptr alloc_beg = reinterpret_cast<uptr>(allocated);
    350   // Clear the first allocated word (an old kMemalignMagic may still be there).
    351   reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
    352   uptr alloc_end = alloc_beg + needed_size;
    353   uptr beg_plus_redzone = alloc_beg + rz_size;
    354   uptr user_beg = beg_plus_redzone;
    355   if (!IsAligned(user_beg, alignment))
    356     user_beg = RoundUpTo(user_beg, alignment);
    357   uptr user_end = user_beg + size;
    358   CHECK_LE(user_end, alloc_end);
    359   uptr chunk_beg = user_beg - kChunkHeaderSize;
    360   AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    361   m->chunk_state = CHUNK_ALLOCATED;
    362   m->alloc_type = alloc_type;
    363   m->rz_log = rz_log;
    364   u32 alloc_tid = t ? t->tid() : 0;
    365   m->alloc_tid = alloc_tid;
    366   CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
    367   m->free_tid = kInvalidTid;
    368   m->from_memalign = user_beg != beg_plus_redzone;
    369   if (m->from_memalign) {
    370     CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
    371     uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
    372     memalign_magic[0] = kMemalignMagic;
    373     memalign_magic[1] = chunk_beg;
    374   }
    375   if (using_primary_allocator) {
    376     CHECK(size);
    377     m->user_requested_size = size;
    378     CHECK(allocator.FromPrimary(allocated));
    379   } else {
    380     CHECK(!allocator.FromPrimary(allocated));
    381     m->user_requested_size = SizeClassMap::kMaxSize;
    382     uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
    383     meta[0] = size;
    384     meta[1] = chunk_beg;
    385   }
    386 
    387   if (flags()->use_stack_depot) {
    388     m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
    389   } else {
    390     m->alloc_context_id = 0;
    391     StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
    392   }
    393 
    394   uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
    395   // Unpoison the bulk of the memory region.
    396   if (size_rounded_down_to_granularity)
    397     PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
    398   // Deal with the end of the region if size is not aligned to granularity.
    399   if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
    400     u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
    401     *shadow = size & (SHADOW_GRANULARITY - 1);
    402   }
    403 
    404   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    405   thread_stats.mallocs++;
    406   thread_stats.malloced += size;
    407   thread_stats.malloced_redzones += needed_size - size;
    408   uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
    409   thread_stats.malloced_by_size[class_id]++;
    410   if (needed_size > SizeClassMap::kMaxSize)
    411     thread_stats.malloc_large++;
    412 
    413   void *res = reinterpret_cast<void *>(user_beg);
    414   ASAN_MALLOC_HOOK(res, size);
    415   return res;
    416 }
    417 
    418 static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
    419   uptr p = reinterpret_cast<uptr>(ptr);
    420   if (p == 0) return;
    421   ASAN_FREE_HOOK(ptr);
    422   uptr chunk_beg = p - kChunkHeaderSize;
    423   AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    424 
    425   // Flip the chunk_state atomically to avoid race on double-free.
    426   u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
    427                                        memory_order_relaxed);
    428 
    429   if (old_chunk_state == CHUNK_QUARANTINE)
    430     ReportDoubleFree((uptr)ptr, stack);
    431   else if (old_chunk_state != CHUNK_ALLOCATED)
    432     ReportFreeNotMalloced((uptr)ptr, stack);
    433   CHECK(old_chunk_state == CHUNK_ALLOCATED);
    434   if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
    435     ReportAllocTypeMismatch((uptr)ptr, stack,
    436                             (AllocType)m->alloc_type, (AllocType)alloc_type);
    437 
    438   CHECK_GE(m->alloc_tid, 0);
    439   if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
    440     CHECK_EQ(m->free_tid, kInvalidTid);
    441   AsanThread *t = asanThreadRegistry().GetCurrent();
    442   m->free_tid = t ? t->tid() : 0;
    443   if (flags()->use_stack_depot) {
    444     m->free_context_id = StackDepotPut(stack->trace, stack->size);
    445   } else {
    446     m->free_context_id = 0;
    447     StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
    448   }
    449   CHECK(m->chunk_state == CHUNK_QUARANTINE);
    450   // Poison the region.
    451   PoisonShadow(m->Beg(),
    452                RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
    453                kAsanHeapFreeMagic);
    454 
    455   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    456   thread_stats.frees++;
    457   thread_stats.freed += m->UsedSize();
    458 
    459   // Push into quarantine.
    460   if (t) {
    461     AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
    462     AllocatorCache *ac = GetAllocatorCache(ms);
    463     quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
    464                    m, m->UsedSize());
    465   } else {
    466     SpinMutexLock l(&fallback_mutex);
    467     AllocatorCache *ac = &fallback_allocator_cache;
    468     quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
    469                    m, m->UsedSize());
    470   }
    471 }
    472 
    473 static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
    474   CHECK(old_ptr && new_size);
    475   uptr p = reinterpret_cast<uptr>(old_ptr);
    476   uptr chunk_beg = p - kChunkHeaderSize;
    477   AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
    478 
    479   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    480   thread_stats.reallocs++;
    481   thread_stats.realloced += new_size;
    482 
    483   CHECK(m->chunk_state == CHUNK_ALLOCATED);
    484   uptr old_size = m->UsedSize();
    485   uptr memcpy_size = Min(new_size, old_size);
    486   void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
    487   if (new_ptr) {
    488     CHECK_NE(REAL(memcpy), (void*)0);
    489     REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
    490     Deallocate(old_ptr, stack, FROM_MALLOC);
    491   }
    492   return new_ptr;
    493 }
    494 
    495 static AsanChunk *GetAsanChunkByAddr(uptr p) {
    496   void *ptr = reinterpret_cast<void *>(p);
    497   uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
    498   if (!alloc_beg) return 0;
    499   uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
    500   if (memalign_magic[0] == kMemalignMagic) {
    501     AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
    502     CHECK(m->from_memalign);
    503     return m;
    504   }
    505   if (!allocator.FromPrimary(ptr)) {
    506     uptr *meta = reinterpret_cast<uptr *>(
    507         allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
    508     AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
    509     return m;
    510   }
    511   uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
    512   CHECK_LE(actual_size, SizeClassMap::kMaxSize);
    513   // We know the actually allocted size, but we don't know the redzone size.
    514   // Just try all possible redzone sizes.
    515   for (u32 rz_log = 0; rz_log < 8; rz_log++) {
    516     u32 rz_size = RZLog2Size(rz_log);
    517     uptr max_possible_size = actual_size - rz_size;
    518     if (ComputeRZLog(max_possible_size) != rz_log)
    519       continue;
    520     return reinterpret_cast<AsanChunk *>(
    521         alloc_beg + rz_size - kChunkHeaderSize);
    522   }
    523   return 0;
    524 }
    525 
    526 static uptr AllocationSize(uptr p) {
    527   AsanChunk *m = GetAsanChunkByAddr(p);
    528   if (!m) return 0;
    529   if (m->chunk_state != CHUNK_ALLOCATED) return 0;
    530   if (m->Beg() != p) return 0;
    531   return m->UsedSize();
    532 }
    533 
    534 // We have an address between two chunks, and we want to report just one.
    535 AsanChunk *ChooseChunk(uptr addr,
    536                        AsanChunk *left_chunk, AsanChunk *right_chunk) {
    537   // Prefer an allocated chunk over freed chunk and freed chunk
    538   // over available chunk.
    539   if (left_chunk->chunk_state != right_chunk->chunk_state) {
    540     if (left_chunk->chunk_state == CHUNK_ALLOCATED)
    541       return left_chunk;
    542     if (right_chunk->chunk_state == CHUNK_ALLOCATED)
    543       return right_chunk;
    544     if (left_chunk->chunk_state == CHUNK_QUARANTINE)
    545       return left_chunk;
    546     if (right_chunk->chunk_state == CHUNK_QUARANTINE)
    547       return right_chunk;
    548   }
    549   // Same chunk_state: choose based on offset.
    550   sptr l_offset = 0, r_offset = 0;
    551   CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
    552   CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
    553   if (l_offset < r_offset)
    554     return left_chunk;
    555   return right_chunk;
    556 }
    557 
    558 AsanChunkView FindHeapChunkByAddress(uptr addr) {
    559   AsanChunk *m1 = GetAsanChunkByAddr(addr);
    560   if (!m1) return AsanChunkView(m1);
    561   sptr offset = 0;
    562   if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
    563     // The address is in the chunk's left redzone, so maybe it is actually
    564     // a right buffer overflow from the other chunk to the left.
    565     // Search a bit to the left to see if there is another chunk.
    566     AsanChunk *m2 = 0;
    567     for (uptr l = 1; l < GetPageSizeCached(); l++) {
    568       m2 = GetAsanChunkByAddr(addr - l);
    569       if (m2 == m1) continue;  // Still the same chunk.
    570       break;
    571     }
    572     if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
    573       m1 = ChooseChunk(addr, m2, m1);
    574   }
    575   return AsanChunkView(m1);
    576 }
    577 
    578 void AsanThreadLocalMallocStorage::CommitBack() {
    579   AllocatorCache *ac = GetAllocatorCache(this);
    580   quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
    581   allocator.SwallowCache(GetAllocatorCache(this));
    582 }
    583 
    584 void PrintInternalAllocatorStats() {
    585   allocator.PrintStats();
    586 }
    587 
    588 SANITIZER_INTERFACE_ATTRIBUTE
    589 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
    590                     AllocType alloc_type) {
    591   return Allocate(size, alignment, stack, alloc_type);
    592 }
    593 
    594 SANITIZER_INTERFACE_ATTRIBUTE
    595 void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
    596   Deallocate(ptr, stack, alloc_type);
    597 }
    598 
    599 SANITIZER_INTERFACE_ATTRIBUTE
    600 void *asan_malloc(uptr size, StackTrace *stack) {
    601   return Allocate(size, 8, stack, FROM_MALLOC);
    602 }
    603 
    604 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
    605   if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
    606   void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
    607   // If the memory comes from the secondary allocator no need to clear it
    608   // as it comes directly from mmap.
    609   if (ptr && allocator.FromPrimary(ptr))
    610     REAL(memset)(ptr, 0, nmemb * size);
    611   return ptr;
    612 }
    613 
    614 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
    615   if (p == 0)
    616     return Allocate(size, 8, stack, FROM_MALLOC);
    617   if (size == 0) {
    618     Deallocate(p, stack, FROM_MALLOC);
    619     return 0;
    620   }
    621   return Reallocate(p, size, stack);
    622 }
    623 
    624 void *asan_valloc(uptr size, StackTrace *stack) {
    625   return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
    626 }
    627 
    628 void *asan_pvalloc(uptr size, StackTrace *stack) {
    629   uptr PageSize = GetPageSizeCached();
    630   size = RoundUpTo(size, PageSize);
    631   if (size == 0) {
    632     // pvalloc(0) should allocate one page.
    633     size = PageSize;
    634   }
    635   return Allocate(size, PageSize, stack, FROM_MALLOC);
    636 }
    637 
    638 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
    639                         StackTrace *stack) {
    640   void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
    641   CHECK(IsAligned((uptr)ptr, alignment));
    642   *memptr = ptr;
    643   return 0;
    644 }
    645 
    646 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
    647   CHECK(stack);
    648   if (ptr == 0) return 0;
    649   uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
    650   if (flags()->check_malloc_usable_size && (usable_size == 0))
    651     ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
    652   return usable_size;
    653 }
    654 
    655 uptr asan_mz_size(const void *ptr) {
    656   return AllocationSize(reinterpret_cast<uptr>(ptr));
    657 }
    658 
    659 void asan_mz_force_lock() {
    660   allocator.ForceLock();
    661   fallback_mutex.Lock();
    662 }
    663 
    664 void asan_mz_force_unlock() {
    665   fallback_mutex.Unlock();
    666   allocator.ForceUnlock();
    667 }
    668 
    669 }  // namespace __asan
    670 
    671 // ---------------------- Interface ---------------- {{{1
    672 using namespace __asan;  // NOLINT
    673 
    674 // ASan allocator doesn't reserve extra bytes, so normally we would
    675 // just return "size". We don't want to expose our redzone sizes, etc here.
    676 uptr __asan_get_estimated_allocated_size(uptr size) {
    677   return size;
    678 }
    679 
    680 bool __asan_get_ownership(const void *p) {
    681   uptr ptr = reinterpret_cast<uptr>(p);
    682   return (AllocationSize(ptr) > 0);
    683 }
    684 
    685 uptr __asan_get_allocated_size(const void *p) {
    686   if (p == 0) return 0;
    687   uptr ptr = reinterpret_cast<uptr>(p);
    688   uptr allocated_size = AllocationSize(ptr);
    689   // Die if p is not malloced or if it is already freed.
    690   if (allocated_size == 0) {
    691     GET_STACK_TRACE_FATAL_HERE;
    692     ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
    693   }
    694   return allocated_size;
    695 }
    696 
    697 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
    698 // Provide default (no-op) implementation of malloc hooks.
    699 extern "C" {
    700 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
    701 void __asan_malloc_hook(void *ptr, uptr size) {
    702   (void)ptr;
    703   (void)size;
    704 }
    705 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
    706 void __asan_free_hook(void *ptr) {
    707   (void)ptr;
    708 }
    709 }  // extern "C"
    710 #endif
    711 
    712 
    713 #endif  // ASAN_ALLOCATOR_VERSION
    714