Home | History | Annotate | Download | only in msan
      1 //===-- msan_linux.cc -----------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of MemorySanitizer.
     11 //
     12 // Linux- and FreeBSD-specific code.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "sanitizer_common/sanitizer_platform.h"
     16 #if SANITIZER_FREEBSD || SANITIZER_LINUX
     17 
     18 #include "msan.h"
     19 #include "msan_thread.h"
     20 
     21 #include <elf.h>
     22 #include <link.h>
     23 #include <pthread.h>
     24 #include <stdio.h>
     25 #include <stdlib.h>
     26 #include <signal.h>
     27 #include <unistd.h>
     28 #include <unwind.h>
     29 #include <execinfo.h>
     30 #include <sys/time.h>
     31 #include <sys/resource.h>
     32 
     33 #include "sanitizer_common/sanitizer_common.h"
     34 #include "sanitizer_common/sanitizer_procmaps.h"
     35 
     36 namespace __msan {
     37 
     38 void ReportMapRange(const char *descr, uptr beg, uptr size) {
     39   if (size > 0) {
     40     uptr end = beg + size - 1;
     41     VPrintf(1, "%s : %p - %p\n", descr, beg, end);
     42   }
     43 }
     44 
     45 static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
     46   if (size > 0) {
     47     uptr end = beg + size - 1;
     48     if (!MemoryRangeIsAvailable(beg, end)) {
     49       Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
     50       return false;
     51     }
     52   }
     53   return true;
     54 }
     55 
     56 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
     57   if (size > 0) {
     58     void *addr = MmapNoAccess(beg, size, name);
     59     if (beg == 0 && addr) {
     60       // Depending on the kernel configuration, we may not be able to protect
     61       // the page at address zero.
     62       uptr gap = 16 * GetPageSizeCached();
     63       beg += gap;
     64       size -= gap;
     65       addr = MmapNoAccess(beg, size, name);
     66     }
     67     if ((uptr)addr != beg) {
     68       uptr end = beg + size - 1;
     69       Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end);
     70       return false;
     71     }
     72   }
     73   return true;
     74 }
     75 
     76 static void CheckMemoryLayoutSanity() {
     77   uptr prev_end = 0;
     78   for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
     79     uptr start = kMemoryLayout[i].start;
     80     uptr end = kMemoryLayout[i].end;
     81     MappingDesc::Type type = kMemoryLayout[i].type;
     82     CHECK_LT(start, end);
     83     CHECK_EQ(prev_end, start);
     84     CHECK(addr_is_type(start, type));
     85     CHECK(addr_is_type((start + end) / 2, type));
     86     CHECK(addr_is_type(end - 1, type));
     87     if (type == MappingDesc::APP) {
     88       uptr addr = start;
     89       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
     90       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
     91       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
     92 
     93       addr = (start + end) / 2;
     94       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
     95       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
     96       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
     97 
     98       addr = end - 1;
     99       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
    100       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
    101       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
    102     }
    103     prev_end = end;
    104   }
    105 }
    106 
    107 bool InitShadow(bool init_origins) {
    108   // Let user know mapping parameters first.
    109   VPrintf(1, "__msan_init %p\n", &__msan_init);
    110   for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
    111     VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
    112             kMemoryLayout[i].end - 1);
    113 
    114   CheckMemoryLayoutSanity();
    115 
    116   if (!MEM_IS_APP(&__msan_init)) {
    117     Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
    118            (uptr)&__msan_init);
    119     return false;
    120   }
    121 
    122   const uptr maxVirtualAddress = GetMaxVirtualAddress();
    123 
    124   for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
    125     uptr start = kMemoryLayout[i].start;
    126     uptr end = kMemoryLayout[i].end;
    127     uptr size= end - start;
    128     MappingDesc::Type type = kMemoryLayout[i].type;
    129 
    130     // Check if the segment should be mapped based on platform constraints.
    131     if (start >= maxVirtualAddress)
    132       continue;
    133 
    134     bool map = type == MappingDesc::SHADOW ||
    135                (init_origins && type == MappingDesc::ORIGIN);
    136     bool protect = type == MappingDesc::INVALID ||
    137                    (!init_origins && type == MappingDesc::ORIGIN);
    138     CHECK(!(map && protect));
    139     if (!map && !protect)
    140       CHECK(type == MappingDesc::APP);
    141     if (map) {
    142       if (!CheckMemoryRangeAvailability(start, size))
    143         return false;
    144       if ((uptr)MmapFixedNoReserve(start, size, kMemoryLayout[i].name) != start)
    145         return false;
    146       if (common_flags()->use_madv_dontdump)
    147         DontDumpShadowMemory(start, size);
    148     }
    149     if (protect) {
    150       if (!CheckMemoryRangeAvailability(start, size))
    151         return false;
    152       if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
    153         return false;
    154     }
    155   }
    156 
    157   return true;
    158 }
    159 
    160 static void MsanAtExit(void) {
    161   if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
    162     ReportStats();
    163   if (msan_report_count > 0) {
    164     ReportAtExitStatistics();
    165     if (common_flags()->exitcode)
    166       internal__exit(common_flags()->exitcode);
    167   }
    168 }
    169 
    170 void InstallAtExitHandler() {
    171   atexit(MsanAtExit);
    172 }
    173 
    174 // ---------------------- TSD ---------------- {{{1
    175 
    176 static pthread_key_t tsd_key;
    177 static bool tsd_key_inited = false;
    178 
    179 void MsanTSDInit(void (*destructor)(void *tsd)) {
    180   CHECK(!tsd_key_inited);
    181   tsd_key_inited = true;
    182   CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
    183 }
    184 
    185 static THREADLOCAL MsanThread* msan_current_thread;
    186 
    187 MsanThread *GetCurrentThread() {
    188   return msan_current_thread;
    189 }
    190 
    191 void SetCurrentThread(MsanThread *t) {
    192   // Make sure we do not reset the current MsanThread.
    193   CHECK_EQ(0, msan_current_thread);
    194   msan_current_thread = t;
    195   // Make sure that MsanTSDDtor gets called at the end.
    196   CHECK(tsd_key_inited);
    197   pthread_setspecific(tsd_key, (void *)t);
    198 }
    199 
    200 void MsanTSDDtor(void *tsd) {
    201   MsanThread *t = (MsanThread*)tsd;
    202   if (t->destructor_iterations_ > 1) {
    203     t->destructor_iterations_--;
    204     CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
    205     return;
    206   }
    207   msan_current_thread = nullptr;
    208   // Make sure that signal handler can not see a stale current thread pointer.
    209   atomic_signal_fence(memory_order_seq_cst);
    210   MsanThread::TSDDtor(tsd);
    211 }
    212 
    213 } // namespace __msan
    214 
    215 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX
    216