Home | History | Annotate | Download | only in asan
      1 //===-- asan_poisoning.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Shadow memory poisoning by ASan RTL and by user application.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "asan_interceptors.h"
     16 #include "asan_internal.h"
     17 #include "asan_mapping.h"
     18 #include "sanitizer_common/sanitizer_libc.h"
     19 
     20 namespace __asan {
     21 
     22 void PoisonShadow(uptr addr, uptr size, u8 value) {
     23   if (!flags()->poison_heap) return;
     24   CHECK(AddrIsAlignedByGranularity(addr));
     25   CHECK(AddrIsAlignedByGranularity(addr + size));
     26   uptr shadow_beg = MemToShadow(addr);
     27   uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1;
     28   CHECK(REAL(memset) != 0);
     29   REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
     30 }
     31 
     32 void PoisonShadowPartialRightRedzone(uptr addr,
     33                                      uptr size,
     34                                      uptr redzone_size,
     35                                      u8 value) {
     36   if (!flags()->poison_heap) return;
     37   CHECK(AddrIsAlignedByGranularity(addr));
     38   u8 *shadow = (u8*)MemToShadow(addr);
     39   for (uptr i = 0; i < redzone_size;
     40        i += SHADOW_GRANULARITY, shadow++) {
     41     if (i + SHADOW_GRANULARITY <= size) {
     42       *shadow = 0;  // fully addressable
     43     } else if (i >= size) {
     44       *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value;  // unaddressable
     45     } else {
     46       *shadow = size - i;  // first size-i bytes are addressable
     47     }
     48   }
     49 }
     50 
     51 
     52 struct ShadowSegmentEndpoint {
     53   u8 *chunk;
     54   s8 offset;  // in [0, SHADOW_GRANULARITY)
     55   s8 value;  // = *chunk;
     56 
     57   explicit ShadowSegmentEndpoint(uptr address) {
     58     chunk = (u8*)MemToShadow(address);
     59     offset = address & (SHADOW_GRANULARITY - 1);
     60     value = *chunk;
     61   }
     62 };
     63 
     64 }  // namespace __asan
     65 
     66 // ---------------------- Interface ---------------- {{{1
     67 using namespace __asan;  // NOLINT
     68 
     69 // Current implementation of __asan_(un)poison_memory_region doesn't check
     70 // that user program (un)poisons the memory it owns. It poisons memory
     71 // conservatively, and unpoisons progressively to make sure asan shadow
     72 // mapping invariant is preserved (see detailed mapping description here:
     73 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
     74 //
     75 // * if user asks to poison region [left, right), the program poisons
     76 // at least [left, AlignDown(right)).
     77 // * if user asks to unpoison region [left, right), the program unpoisons
     78 // at most [AlignDown(left), right).
     79 void __asan_poison_memory_region(void const volatile *addr, uptr size) {
     80   if (!flags()->allow_user_poisoning || size == 0) return;
     81   uptr beg_addr = (uptr)addr;
     82   uptr end_addr = beg_addr + size;
     83   if (flags()->verbosity >= 1) {
     84     Printf("Trying to poison memory region [%p, %p)\n",
     85            (void*)beg_addr, (void*)end_addr);
     86   }
     87   ShadowSegmentEndpoint beg(beg_addr);
     88   ShadowSegmentEndpoint end(end_addr);
     89   if (beg.chunk == end.chunk) {
     90     CHECK(beg.offset < end.offset);
     91     s8 value = beg.value;
     92     CHECK(value == end.value);
     93     // We can only poison memory if the byte in end.offset is unaddressable.
     94     // No need to re-poison memory if it is poisoned already.
     95     if (value > 0 && value <= end.offset) {
     96       if (beg.offset > 0) {
     97         *beg.chunk = Min(value, beg.offset);
     98       } else {
     99         *beg.chunk = kAsanUserPoisonedMemoryMagic;
    100       }
    101     }
    102     return;
    103   }
    104   CHECK(beg.chunk < end.chunk);
    105   if (beg.offset > 0) {
    106     // Mark bytes from beg.offset as unaddressable.
    107     if (beg.value == 0) {
    108       *beg.chunk = beg.offset;
    109     } else {
    110       *beg.chunk = Min(beg.value, beg.offset);
    111     }
    112     beg.chunk++;
    113   }
    114   REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
    115   // Poison if byte in end.offset is unaddressable.
    116   if (end.value > 0 && end.value <= end.offset) {
    117     *end.chunk = kAsanUserPoisonedMemoryMagic;
    118   }
    119 }
    120 
    121 void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
    122   if (!flags()->allow_user_poisoning || size == 0) return;
    123   uptr beg_addr = (uptr)addr;
    124   uptr end_addr = beg_addr + size;
    125   if (flags()->verbosity >= 1) {
    126     Printf("Trying to unpoison memory region [%p, %p)\n",
    127            (void*)beg_addr, (void*)end_addr);
    128   }
    129   ShadowSegmentEndpoint beg(beg_addr);
    130   ShadowSegmentEndpoint end(end_addr);
    131   if (beg.chunk == end.chunk) {
    132     CHECK(beg.offset < end.offset);
    133     s8 value = beg.value;
    134     CHECK(value == end.value);
    135     // We unpoison memory bytes up to enbytes up to end.offset if it is not
    136     // unpoisoned already.
    137     if (value != 0) {
    138       *beg.chunk = Max(value, end.offset);
    139     }
    140     return;
    141   }
    142   CHECK(beg.chunk < end.chunk);
    143   if (beg.offset > 0) {
    144     *beg.chunk = 0;
    145     beg.chunk++;
    146   }
    147   REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
    148   if (end.offset > 0 && end.value != 0) {
    149     *end.chunk = Max(end.value, end.offset);
    150   }
    151 }
    152 
    153 bool __asan_address_is_poisoned(void const volatile *addr) {
    154   return __asan::AddressIsPoisoned((uptr)addr);
    155 }
    156 
    157 uptr __asan_region_is_poisoned(uptr beg, uptr size) {
    158   if (!size) return 0;
    159   uptr end = beg + size;
    160   if (!AddrIsInMem(beg)) return beg;
    161   if (!AddrIsInMem(end)) return end;
    162   uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
    163   uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
    164   uptr shadow_beg = MemToShadow(aligned_b);
    165   uptr shadow_end = MemToShadow(aligned_e);
    166   // First check the first and the last application bytes,
    167   // then check the SHADOW_GRANULARITY-aligned region by calling
    168   // mem_is_zero on the corresponding shadow.
    169   if (!__asan::AddressIsPoisoned(beg) &&
    170       !__asan::AddressIsPoisoned(end - 1) &&
    171       (shadow_end <= shadow_beg ||
    172        __sanitizer::mem_is_zero((const char *)shadow_beg,
    173                                 shadow_end - shadow_beg)))
    174     return 0;
    175   // The fast check failed, so we have a poisoned byte somewhere.
    176   // Find it slowly.
    177   for (; beg < end; beg++)
    178     if (__asan::AddressIsPoisoned(beg))
    179       return beg;
    180   UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
    181   return 0;
    182 }
    183 
    184 // This is a simplified version of __asan_(un)poison_memory_region, which
    185 // assumes that left border of region to be poisoned is properly aligned.
    186 static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
    187   if (size == 0) return;
    188   uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
    189   PoisonShadow(addr, aligned_size,
    190                do_poison ? kAsanStackUseAfterScopeMagic : 0);
    191   if (size == aligned_size)
    192     return;
    193   s8 end_offset = (s8)(size - aligned_size);
    194   s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
    195   s8 end_value = *shadow_end;
    196   if (do_poison) {
    197     // If possible, mark all the bytes mapping to last shadow byte as
    198     // unaddressable.
    199     if (end_value > 0 && end_value <= end_offset)
    200       *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
    201   } else {
    202     // If necessary, mark few first bytes mapping to last shadow byte
    203     // as addressable
    204     if (end_value != 0)
    205       *shadow_end = Max(end_value, end_offset);
    206   }
    207 }
    208 
    209 void __asan_poison_stack_memory(uptr addr, uptr size) {
    210   if (flags()->verbosity > 0)
    211     Report("poisoning: %p %zx\n", (void*)addr, size);
    212   PoisonAlignedStackMemory(addr, size, true);
    213 }
    214 
    215 void __asan_unpoison_stack_memory(uptr addr, uptr size) {
    216   if (flags()->verbosity > 0)
    217     Report("unpoisoning: %p %zx\n", (void*)addr, size);
    218   PoisonAlignedStackMemory(addr, size, false);
    219 }
    220