Home | History | Annotate | Download | only in asan
      1 //===-- asan_allocator.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Implementation of ASan's memory allocator.
     13 // Evey piece of memory (AsanChunk) allocated by the allocator
     14 // has a left redzone of REDZONE bytes and
     15 // a right redzone such that the end of the chunk is aligned by REDZONE
     16 // (i.e. the right redzone is between 0 and REDZONE-1).
     17 // The left redzone is always poisoned.
     18 // The right redzone is poisoned on malloc, the body is poisoned on free.
     19 // Once freed, a chunk is moved to a quarantine (fifo list).
     20 // After quarantine, a chunk is returned to freelists.
     21 //
     22 // The left redzone contains ASan's internal data and the stack trace of
     23 // the malloc call.
     24 // Once freed, the body of the chunk contains the stack trace of the free call.
     25 //
     26 //===----------------------------------------------------------------------===//
     27 #include "asan_allocator.h"
     28 
     29 #if ASAN_ALLOCATOR_VERSION == 1
     30 #include "asan_interceptors.h"
     31 #include "asan_internal.h"
     32 #include "asan_mapping.h"
     33 #include "asan_stats.h"
     34 #include "asan_report.h"
     35 #include "asan_thread.h"
     36 #include "asan_thread_registry.h"
     37 #include "sanitizer_common/sanitizer_allocator.h"
     38 #include "sanitizer_common/sanitizer_atomic.h"
     39 #include "sanitizer_common/sanitizer_mutex.h"
     40 
     41 namespace __asan {
     42 
     43 #define REDZONE ((uptr)(flags()->redzone))
     44 static const uptr kMinAllocSize = REDZONE * 2;
     45 static const u64 kMaxAvailableRam = 128ULL << 30;  // 128G
     46 static const uptr kMaxThreadLocalQuarantine = 1 << 20;  // 1M
     47 
     48 static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
     49 static const uptr kMaxSizeForThreadLocalFreeList =
     50     (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
     51 
     52 // Size classes less than kMallocSizeClassStep are powers of two.
     53 // All other size classes are multiples of kMallocSizeClassStep.
     54 static const uptr kMallocSizeClassStepLog = 26;
     55 static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
     56 
     57 static const uptr kMaxAllowedMallocSize =
     58     (SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
     59 
     60 static inline uptr SizeClassToSize(u8 size_class) {
     61   CHECK(size_class < kNumberOfSizeClasses);
     62   if (size_class <= kMallocSizeClassStepLog) {
     63     return 1UL << size_class;
     64   } else {
     65     return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
     66   }
     67 }
     68 
     69 static inline u8 SizeToSizeClass(uptr size) {
     70   u8 res = 0;
     71   if (size <= kMallocSizeClassStep) {
     72     uptr rounded = RoundUpToPowerOfTwo(size);
     73     res = Log2(rounded);
     74   } else {
     75     res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
     76         + kMallocSizeClassStepLog;
     77   }
     78   CHECK(res < kNumberOfSizeClasses);
     79   CHECK(size <= SizeClassToSize(res));
     80   return res;
     81 }
     82 
     83 // Given REDZONE bytes, we need to mark first size bytes
     84 // as addressable and the rest REDZONE-size bytes as unaddressable.
     85 static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
     86   CHECK(size <= REDZONE);
     87   CHECK(IsAligned(mem, REDZONE));
     88   CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
     89   CHECK(IsPowerOfTwo(REDZONE));
     90   CHECK(REDZONE >= SHADOW_GRANULARITY);
     91   PoisonShadowPartialRightRedzone(mem, size, REDZONE,
     92                                   kAsanHeapRightRedzoneMagic);
     93 }
     94 
     95 static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
     96   CHECK(IsAligned(size, GetPageSizeCached()));
     97   u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
     98   PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
     99   if (flags()->debug) {
    100     Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
    101   }
    102   return res;
    103 }
    104 
    105 // Every chunk of memory allocated by this allocator can be in one of 3 states:
    106 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
    107 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
    108 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
    109 //
    110 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
    111 // the beginning of a AsanChunk (in which the actual chunk resides at
    112 // this - this->used_size).
    113 //
    114 // The magic numbers for the enum values are taken randomly.
    115 enum {
    116   CHUNK_AVAILABLE  = 0x57,
    117   CHUNK_ALLOCATED  = 0x32,
    118   CHUNK_QUARANTINE = 0x19,
    119   CHUNK_MEMALIGN   = 0xDC
    120 };
    121 
    122 struct ChunkBase {
    123   // First 8 bytes.
    124   uptr  chunk_state : 8;
    125   uptr  alloc_tid   : 24;
    126   uptr  size_class  : 8;
    127   uptr  free_tid    : 24;
    128 
    129   // Second 8 bytes.
    130   uptr alignment_log : 8;
    131   uptr alloc_type    : 2;
    132   uptr used_size : FIRST_32_SECOND_64(32, 54);  // Size requested by the user.
    133 
    134   // This field may overlap with the user area and thus should not
    135   // be used while the chunk is in CHUNK_ALLOCATED state.
    136   AsanChunk *next;
    137 
    138   // Typically the beginning of the user-accessible memory is 'this'+REDZONE
    139   // and is also aligned by REDZONE. However, if the memory is allocated
    140   // by memalign, the alignment might be higher and the user-accessible memory
    141   // starts at the first properly aligned address after 'this'.
    142   uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
    143   uptr Size() { return SizeClassToSize(size_class); }
    144   u8 SizeClass() { return size_class; }
    145 };
    146 
    147 struct AsanChunk: public ChunkBase {
    148   u32 *compressed_alloc_stack() {
    149     return (u32*)((uptr)this + sizeof(ChunkBase));
    150   }
    151   u32 *compressed_free_stack() {
    152     return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
    153   }
    154 
    155   // The left redzone after the ChunkBase is given to the alloc stack trace.
    156   uptr compressed_alloc_stack_size() {
    157     if (REDZONE < sizeof(ChunkBase)) return 0;
    158     return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
    159   }
    160   uptr compressed_free_stack_size() {
    161     if (REDZONE < sizeof(ChunkBase)) return 0;
    162     return (REDZONE) / sizeof(u32);
    163   }
    164 };
    165 
    166 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
    167 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
    168 uptr AsanChunkView::UsedSize() { return chunk_->used_size; }
    169 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
    170 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
    171 
    172 void AsanChunkView::GetAllocStack(StackTrace *stack) {
    173   StackTrace::UncompressStack(stack, chunk_->compressed_alloc_stack(),
    174                               chunk_->compressed_alloc_stack_size());
    175 }
    176 
    177 void AsanChunkView::GetFreeStack(StackTrace *stack) {
    178   StackTrace::UncompressStack(stack, chunk_->compressed_free_stack(),
    179                               chunk_->compressed_free_stack_size());
    180 }
    181 
    182 static AsanChunk *PtrToChunk(uptr ptr) {
    183   AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
    184   if (m->chunk_state == CHUNK_MEMALIGN) {
    185     m = (AsanChunk*)((uptr)m - m->used_size);
    186   }
    187   return m;
    188 }
    189 
    190 void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
    191   CHECK(q->size() > 0);
    192   size_ += q->size();
    193   append_back(q);
    194   q->clear();
    195 }
    196 
    197 void AsanChunkFifoList::Push(AsanChunk *n) {
    198   push_back(n);
    199   size_ += n->Size();
    200 }
    201 
    202 // Interesting performance observation: this function takes up to 15% of overal
    203 // allocator time. That's because *first_ has been evicted from cache long time
    204 // ago. Not sure if we can or want to do anything with this.
    205 AsanChunk *AsanChunkFifoList::Pop() {
    206   CHECK(first_);
    207   AsanChunk *res = front();
    208   size_ -= res->Size();
    209   pop_front();
    210   return res;
    211 }
    212 
    213 // All pages we ever allocated.
    214 struct PageGroup {
    215   uptr beg;
    216   uptr end;
    217   uptr size_of_chunk;
    218   uptr last_chunk;
    219   bool InRange(uptr addr) {
    220     return addr >= beg && addr < end;
    221   }
    222 };
    223 
    224 class MallocInfo {
    225  public:
    226   explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
    227 
    228   AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
    229     AsanChunk *m = 0;
    230     AsanChunk **fl = &free_lists_[size_class];
    231     {
    232       BlockingMutexLock lock(&mu_);
    233       for (uptr i = 0; i < n_chunks; i++) {
    234         if (!(*fl)) {
    235           *fl = GetNewChunks(size_class);
    236         }
    237         AsanChunk *t = *fl;
    238         *fl = t->next;
    239         t->next = m;
    240         CHECK(t->chunk_state == CHUNK_AVAILABLE);
    241         m = t;
    242       }
    243     }
    244     return m;
    245   }
    246 
    247   void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
    248                                        bool eat_free_lists) {
    249     CHECK(flags()->quarantine_size > 0);
    250     BlockingMutexLock lock(&mu_);
    251     AsanChunkFifoList *q = &x->quarantine_;
    252     if (q->size() > 0) {
    253       quarantine_.PushList(q);
    254       while (quarantine_.size() > (uptr)flags()->quarantine_size) {
    255         QuarantinePop();
    256       }
    257     }
    258     if (eat_free_lists) {
    259       for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
    260            size_class++) {
    261         AsanChunk *m = x->free_lists_[size_class];
    262         while (m) {
    263           AsanChunk *t = m->next;
    264           m->next = free_lists_[size_class];
    265           free_lists_[size_class] = m;
    266           m = t;
    267         }
    268         x->free_lists_[size_class] = 0;
    269       }
    270     }
    271   }
    272 
    273   void BypassThreadLocalQuarantine(AsanChunk *chunk) {
    274     BlockingMutexLock lock(&mu_);
    275     quarantine_.Push(chunk);
    276   }
    277 
    278   AsanChunk *FindChunkByAddr(uptr addr) {
    279     BlockingMutexLock lock(&mu_);
    280     return FindChunkByAddrUnlocked(addr);
    281   }
    282 
    283   uptr AllocationSize(uptr ptr) {
    284     if (!ptr) return 0;
    285     BlockingMutexLock lock(&mu_);
    286 
    287     // Make sure this is our chunk and |ptr| actually points to the beginning
    288     // of the allocated memory.
    289     AsanChunk *m = FindChunkByAddrUnlocked(ptr);
    290     if (!m || m->Beg() != ptr) return 0;
    291 
    292     if (m->chunk_state == CHUNK_ALLOCATED) {
    293       return m->used_size;
    294     } else {
    295       return 0;
    296     }
    297   }
    298 
    299   void ForceLock() {
    300     mu_.Lock();
    301   }
    302 
    303   void ForceUnlock() {
    304     mu_.Unlock();
    305   }
    306 
    307   void PrintStatus() {
    308     BlockingMutexLock lock(&mu_);
    309     uptr malloced = 0;
    310 
    311     Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
    312            quarantine_.size() >> 20, malloced >> 20);
    313     for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
    314       AsanChunk *i = free_lists_[j];
    315       if (!i) continue;
    316       uptr t = 0;
    317       for (; i; i = i->next) {
    318         t += i->Size();
    319       }
    320       Printf("%zu:%zu ", j, t >> 20);
    321     }
    322     Printf("\n");
    323   }
    324 
    325   PageGroup *FindPageGroup(uptr addr) {
    326     BlockingMutexLock lock(&mu_);
    327     return FindPageGroupUnlocked(addr);
    328   }
    329 
    330  private:
    331   PageGroup *FindPageGroupUnlocked(uptr addr) {
    332     int n = atomic_load(&n_page_groups_, memory_order_relaxed);
    333     // If the page groups are not sorted yet, sort them.
    334     if (n_sorted_page_groups_ < n) {
    335       SortArray((uptr*)page_groups_, n);
    336       n_sorted_page_groups_ = n;
    337     }
    338     // Binary search over the page groups.
    339     int beg = 0, end = n;
    340     while (beg < end) {
    341       int med = (beg + end) / 2;
    342       uptr g = (uptr)page_groups_[med];
    343       if (addr > g) {
    344         // 'g' points to the end of the group, so 'addr'
    345         // may not belong to page_groups_[med] or any previous group.
    346         beg = med + 1;
    347       } else {
    348         // 'addr' may belong to page_groups_[med] or a previous group.
    349         end = med;
    350       }
    351     }
    352     if (beg >= n)
    353       return 0;
    354     PageGroup *g = page_groups_[beg];
    355     CHECK(g);
    356     if (g->InRange(addr))
    357       return g;
    358     return 0;
    359   }
    360 
    361   // We have an address between two chunks, and we want to report just one.
    362   AsanChunk *ChooseChunk(uptr addr,
    363                          AsanChunk *left_chunk, AsanChunk *right_chunk) {
    364     // Prefer an allocated chunk or a chunk from quarantine.
    365     if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
    366         right_chunk->chunk_state != CHUNK_AVAILABLE)
    367       return right_chunk;
    368     if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
    369         left_chunk->chunk_state != CHUNK_AVAILABLE)
    370       return left_chunk;
    371     // Choose based on offset.
    372     sptr l_offset = 0, r_offset = 0;
    373     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
    374     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
    375     if (l_offset < r_offset)
    376       return left_chunk;
    377     return right_chunk;
    378   }
    379 
    380   AsanChunk *FindChunkByAddrUnlocked(uptr addr) {
    381     PageGroup *g = FindPageGroupUnlocked(addr);
    382     if (!g) return 0;
    383     CHECK(g->size_of_chunk);
    384     uptr offset_from_beg = addr - g->beg;
    385     uptr this_chunk_addr = g->beg +
    386         (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
    387     CHECK(g->InRange(this_chunk_addr));
    388     AsanChunk *m = (AsanChunk*)this_chunk_addr;
    389     CHECK(m->chunk_state == CHUNK_ALLOCATED ||
    390           m->chunk_state == CHUNK_AVAILABLE ||
    391           m->chunk_state == CHUNK_QUARANTINE);
    392     sptr offset = 0;
    393     AsanChunkView m_view(m);
    394     if (m_view.AddrIsInside(addr, 1, &offset))
    395       return m;
    396 
    397     if (m_view.AddrIsAtRight(addr, 1, &offset)) {
    398       if (this_chunk_addr == g->last_chunk)  // rightmost chunk
    399         return m;
    400       uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
    401       CHECK(g->InRange(right_chunk_addr));
    402       return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
    403     } else {
    404       CHECK(m_view.AddrIsAtLeft(addr, 1, &offset));
    405       if (this_chunk_addr == g->beg)  // leftmost chunk
    406         return m;
    407       uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
    408       CHECK(g->InRange(left_chunk_addr));
    409       return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
    410     }
    411   }
    412 
    413   void QuarantinePop() {
    414     CHECK(quarantine_.size() > 0);
    415     AsanChunk *m = quarantine_.Pop();
    416     CHECK(m);
    417     // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
    418 
    419     CHECK(m->chunk_state == CHUNK_QUARANTINE);
    420     m->chunk_state = CHUNK_AVAILABLE;
    421     PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
    422     CHECK(m->alloc_tid >= 0);
    423     CHECK(m->free_tid >= 0);
    424 
    425     uptr size_class = m->SizeClass();
    426     m->next = free_lists_[size_class];
    427     free_lists_[size_class] = m;
    428 
    429     // Statistics.
    430     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    431     thread_stats.real_frees++;
    432     thread_stats.really_freed += m->used_size;
    433     thread_stats.really_freed_redzones += m->Size() - m->used_size;
    434     thread_stats.really_freed_by_size[m->SizeClass()]++;
    435   }
    436 
    437   // Get a list of newly allocated chunks.
    438   AsanChunk *GetNewChunks(u8 size_class) {
    439     uptr size = SizeClassToSize(size_class);
    440     CHECK(IsPowerOfTwo(kMinMmapSize));
    441     CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
    442     uptr mmap_size = Max(size, kMinMmapSize);
    443     uptr n_chunks = mmap_size / size;
    444     CHECK(n_chunks * size == mmap_size);
    445     uptr PageSize = GetPageSizeCached();
    446     if (size < PageSize) {
    447       // Size is small, just poison the last chunk.
    448       n_chunks--;
    449     } else {
    450       // Size is large, allocate an extra page at right and poison it.
    451       mmap_size += PageSize;
    452     }
    453     CHECK(n_chunks > 0);
    454     u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
    455 
    456     // Statistics.
    457     AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    458     thread_stats.mmaps++;
    459     thread_stats.mmaped += mmap_size;
    460     thread_stats.mmaped_by_size[size_class] += n_chunks;
    461 
    462     AsanChunk *res = 0;
    463     for (uptr i = 0; i < n_chunks; i++) {
    464       AsanChunk *m = (AsanChunk*)(mem + i * size);
    465       m->chunk_state = CHUNK_AVAILABLE;
    466       m->size_class = size_class;
    467       m->next = res;
    468       res = m;
    469     }
    470     PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
    471     // This memory is already poisoned, no need to poison it again.
    472     pg->beg = (uptr)mem;
    473     pg->end = pg->beg + mmap_size;
    474     pg->size_of_chunk = size;
    475     pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
    476     int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
    477     CHECK(idx < (int)ARRAY_SIZE(page_groups_));
    478     page_groups_[idx] = pg;
    479     return res;
    480   }
    481 
    482   AsanChunk *free_lists_[kNumberOfSizeClasses];
    483   AsanChunkFifoList quarantine_;
    484   BlockingMutex mu_;
    485 
    486   PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
    487   atomic_uint32_t n_page_groups_;
    488   int n_sorted_page_groups_;
    489 };
    490 
    491 static MallocInfo malloc_info(LINKER_INITIALIZED);
    492 
    493 void AsanThreadLocalMallocStorage::CommitBack() {
    494   malloc_info.SwallowThreadLocalMallocStorage(this, true);
    495 }
    496 
    497 AsanChunkView FindHeapChunkByAddress(uptr address) {
    498   return AsanChunkView(malloc_info.FindChunkByAddr(address));
    499 }
    500 
    501 static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack,
    502                     AllocType alloc_type) {
    503   __asan_init();
    504   CHECK(stack);
    505   if (size == 0) {
    506     size = 1;  // TODO(kcc): do something smarter
    507   }
    508   CHECK(IsPowerOfTwo(alignment));
    509   uptr rounded_size = RoundUpTo(size, REDZONE);
    510   uptr needed_size = rounded_size + REDZONE;
    511   if (alignment > REDZONE) {
    512     needed_size += alignment;
    513   }
    514   CHECK(IsAligned(needed_size, REDZONE));
    515   if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
    516     Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
    517            (void*)size);
    518     return 0;
    519   }
    520 
    521   u8 size_class = SizeToSizeClass(needed_size);
    522   uptr size_to_allocate = SizeClassToSize(size_class);
    523   CHECK(size_to_allocate >= kMinAllocSize);
    524   CHECK(size_to_allocate >= needed_size);
    525   CHECK(IsAligned(size_to_allocate, REDZONE));
    526 
    527   if (flags()->verbosity >= 3) {
    528     Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
    529          alignment, size, size_class, size_to_allocate);
    530   }
    531 
    532   AsanThread *t = asanThreadRegistry().GetCurrent();
    533   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    534   // Statistics
    535   thread_stats.mallocs++;
    536   thread_stats.malloced += size;
    537   thread_stats.malloced_redzones += size_to_allocate - size;
    538   thread_stats.malloced_by_size[size_class]++;
    539 
    540   AsanChunk *m = 0;
    541   if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
    542     // get directly from global storage.
    543     m = malloc_info.AllocateChunks(size_class, 1);
    544     thread_stats.malloc_large++;
    545   } else {
    546     // get from the thread-local storage.
    547     AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
    548     if (!*fl) {
    549       uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
    550       *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
    551       thread_stats.malloc_small_slow++;
    552     }
    553     m = *fl;
    554     *fl = (*fl)->next;
    555   }
    556   CHECK(m);
    557   CHECK(m->chunk_state == CHUNK_AVAILABLE);
    558   m->chunk_state = CHUNK_ALLOCATED;
    559   m->alloc_type = alloc_type;
    560   m->next = 0;
    561   CHECK(m->Size() == size_to_allocate);
    562   uptr addr = (uptr)m + REDZONE;
    563   CHECK(addr <= (uptr)m->compressed_free_stack());
    564 
    565   if (alignment > REDZONE && (addr & (alignment - 1))) {
    566     addr = RoundUpTo(addr, alignment);
    567     CHECK((addr & (alignment - 1)) == 0);
    568     AsanChunk *p = (AsanChunk*)(addr - REDZONE);
    569     p->chunk_state = CHUNK_MEMALIGN;
    570     p->used_size = (uptr)p - (uptr)m;
    571     m->alignment_log = Log2(alignment);
    572     CHECK(m->Beg() == addr);
    573   } else {
    574     m->alignment_log = Log2(REDZONE);
    575   }
    576   CHECK(m == PtrToChunk(addr));
    577   m->used_size = size;
    578   CHECK(m->Beg() == addr);
    579   m->alloc_tid = t ? t->tid() : 0;
    580   m->free_tid   = kInvalidTid;
    581   StackTrace::CompressStack(stack, m->compressed_alloc_stack(),
    582                                 m->compressed_alloc_stack_size());
    583   PoisonShadow(addr, rounded_size, 0);
    584   if (size < rounded_size) {
    585     PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
    586                                   size & (REDZONE - 1));
    587   }
    588   if (size <= (uptr)(flags()->max_malloc_fill_size)) {
    589     REAL(memset)((void*)addr, 0, rounded_size);
    590   }
    591   return (u8*)addr;
    592 }
    593 
    594 static void Deallocate(u8 *ptr, StackTrace *stack, AllocType alloc_type) {
    595   if (!ptr) return;
    596   CHECK(stack);
    597 
    598   if (flags()->debug) {
    599     CHECK(malloc_info.FindPageGroup((uptr)ptr));
    600   }
    601 
    602   // Printf("Deallocate %p\n", ptr);
    603   AsanChunk *m = PtrToChunk((uptr)ptr);
    604 
    605   // Flip the chunk_state atomically to avoid race on double-free.
    606   u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
    607                                        memory_order_acq_rel);
    608 
    609   if (old_chunk_state == CHUNK_QUARANTINE) {
    610     ReportDoubleFree((uptr)ptr, stack);
    611   } else if (old_chunk_state != CHUNK_ALLOCATED) {
    612     ReportFreeNotMalloced((uptr)ptr, stack);
    613   }
    614   CHECK(old_chunk_state == CHUNK_ALLOCATED);
    615   if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
    616     ReportAllocTypeMismatch((uptr)ptr, stack,
    617                             (AllocType)m->alloc_type, (AllocType)alloc_type);
    618   // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
    619   CHECK(REDZONE <= 16 || !m->next);
    620   CHECK(m->free_tid == kInvalidTid);
    621   CHECK(m->alloc_tid >= 0);
    622   AsanThread *t = asanThreadRegistry().GetCurrent();
    623   m->free_tid = t ? t->tid() : 0;
    624   StackTrace::CompressStack(stack, m->compressed_free_stack(),
    625                                 m->compressed_free_stack_size());
    626   uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
    627   PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
    628 
    629   // Statistics.
    630   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    631   thread_stats.frees++;
    632   thread_stats.freed += m->used_size;
    633   thread_stats.freed_by_size[m->SizeClass()]++;
    634 
    635   CHECK(m->chunk_state == CHUNK_QUARANTINE);
    636 
    637   if (t) {
    638     AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
    639     ms->quarantine_.Push(m);
    640 
    641     if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
    642       malloc_info.SwallowThreadLocalMallocStorage(ms, false);
    643     }
    644   } else {
    645     malloc_info.BypassThreadLocalQuarantine(m);
    646   }
    647 }
    648 
    649 static u8 *Reallocate(u8 *old_ptr, uptr new_size,
    650                            StackTrace *stack) {
    651   CHECK(old_ptr && new_size);
    652 
    653   // Statistics.
    654   AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
    655   thread_stats.reallocs++;
    656   thread_stats.realloced += new_size;
    657 
    658   AsanChunk *m = PtrToChunk((uptr)old_ptr);
    659   CHECK(m->chunk_state == CHUNK_ALLOCATED);
    660   uptr old_size = m->used_size;
    661   uptr memcpy_size = Min(new_size, old_size);
    662   u8 *new_ptr = Allocate(0, new_size, stack, FROM_MALLOC);
    663   if (new_ptr) {
    664     CHECK(REAL(memcpy) != 0);
    665     REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
    666     Deallocate(old_ptr, stack, FROM_MALLOC);
    667   }
    668   return new_ptr;
    669 }
    670 
    671 }  // namespace __asan
    672 
    673 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
    674 // Provide default (no-op) implementation of malloc hooks.
    675 extern "C" {
    676 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
    677 void __asan_malloc_hook(void *ptr, uptr size) {
    678   (void)ptr;
    679   (void)size;
    680 }
    681 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
    682 void __asan_free_hook(void *ptr) {
    683   (void)ptr;
    684 }
    685 }  // extern "C"
    686 #endif
    687 
    688 namespace __asan {
    689 
    690 void InitializeAllocator() { }
    691 
    692 void PrintInternalAllocatorStats() {
    693 }
    694 
    695 SANITIZER_INTERFACE_ATTRIBUTE
    696 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
    697                     AllocType alloc_type) {
    698   void *ptr = (void*)Allocate(alignment, size, stack, alloc_type);
    699   ASAN_MALLOC_HOOK(ptr, size);
    700   return ptr;
    701 }
    702 
    703 SANITIZER_INTERFACE_ATTRIBUTE
    704 void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
    705   ASAN_FREE_HOOK(ptr);
    706   Deallocate((u8*)ptr, stack, alloc_type);
    707 }
    708 
    709 SANITIZER_INTERFACE_ATTRIBUTE
    710 void *asan_malloc(uptr size, StackTrace *stack) {
    711   void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
    712   ASAN_MALLOC_HOOK(ptr, size);
    713   return ptr;
    714 }
    715 
    716 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
    717   if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
    718   void *ptr = (void*)Allocate(0, nmemb * size, stack, FROM_MALLOC);
    719   if (ptr)
    720     REAL(memset)(ptr, 0, nmemb * size);
    721   ASAN_MALLOC_HOOK(ptr, size);
    722   return ptr;
    723 }
    724 
    725 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
    726   if (p == 0) {
    727     void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
    728     ASAN_MALLOC_HOOK(ptr, size);
    729     return ptr;
    730   } else if (size == 0) {
    731     ASAN_FREE_HOOK(p);
    732     Deallocate((u8*)p, stack, FROM_MALLOC);
    733     return 0;
    734   }
    735   return Reallocate((u8*)p, size, stack);
    736 }
    737 
    738 void *asan_valloc(uptr size, StackTrace *stack) {
    739   void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack, FROM_MALLOC);
    740   ASAN_MALLOC_HOOK(ptr, size);
    741   return ptr;
    742 }
    743 
    744 void *asan_pvalloc(uptr size, StackTrace *stack) {
    745   uptr PageSize = GetPageSizeCached();
    746   size = RoundUpTo(size, PageSize);
    747   if (size == 0) {
    748     // pvalloc(0) should allocate one page.
    749     size = PageSize;
    750   }
    751   void *ptr = (void*)Allocate(PageSize, size, stack, FROM_MALLOC);
    752   ASAN_MALLOC_HOOK(ptr, size);
    753   return ptr;
    754 }
    755 
    756 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
    757                           StackTrace *stack) {
    758   void *ptr = Allocate(alignment, size, stack, FROM_MALLOC);
    759   CHECK(IsAligned((uptr)ptr, alignment));
    760   ASAN_MALLOC_HOOK(ptr, size);
    761   *memptr = ptr;
    762   return 0;
    763 }
    764 
    765 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
    766   CHECK(stack);
    767   if (ptr == 0) return 0;
    768   uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
    769   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
    770     ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
    771   }
    772   return usable_size;
    773 }
    774 
    775 uptr asan_mz_size(const void *ptr) {
    776   return malloc_info.AllocationSize((uptr)ptr);
    777 }
    778 
    779 void asan_mz_force_lock() {
    780   malloc_info.ForceLock();
    781 }
    782 
    783 void asan_mz_force_unlock() {
    784   malloc_info.ForceUnlock();
    785 }
    786 
    787 }  // namespace __asan
    788 
    789 // ---------------------- Interface ---------------- {{{1
    790 using namespace __asan;  // NOLINT
    791 
    792 // ASan allocator doesn't reserve extra bytes, so normally we would
    793 // just return "size".
    794 uptr __asan_get_estimated_allocated_size(uptr size) {
    795   if (size == 0) return 1;
    796   return Min(size, kMaxAllowedMallocSize);
    797 }
    798 
    799 bool __asan_get_ownership(const void *p) {
    800   return malloc_info.AllocationSize((uptr)p) > 0;
    801 }
    802 
    803 uptr __asan_get_allocated_size(const void *p) {
    804   if (p == 0) return 0;
    805   uptr allocated_size = malloc_info.AllocationSize((uptr)p);
    806   // Die if p is not malloced or if it is already freed.
    807   if (allocated_size == 0) {
    808     GET_STACK_TRACE_FATAL_HERE;
    809     ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
    810   }
    811   return allocated_size;
    812 }
    813 #endif  // ASAN_ALLOCATOR_VERSION
    814