Home | History | Annotate | Download | only in esan
      1 //===-- esan_shadow.h -------------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of EfficiencySanitizer, a family of performance tuners.
     11 //
     12 // Shadow memory mappings for the esan run-time.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #ifndef ESAN_SHADOW_H
     16 #define ESAN_SHADOW_H
     17 
     18 #include <sanitizer_common/sanitizer_platform.h>
     19 
     20 #if SANITIZER_WORDSIZE != 64
     21 #error Only 64-bit is supported
     22 #endif
     23 
     24 namespace __esan {
     25 
     26 #if SANITIZER_LINUX && defined(__x86_64__)
     27 // Linux x86_64
     28 //
     29 // Application memory falls into these 5 regions (ignoring the corner case
     30 // of PIE with a non-zero PT_LOAD base):
     31 //
     32 // [0x00000000'00000000, 0x00000100'00000000) non-PIE + heap
     33 // [0x00005500'00000000, 0x00005700'00000000) PIE
     34 // [0x00007e00'00000000, 0x00007fff'ff600000) libraries + stack, part 1
     35 // [0x00007fff'ff601000, 0x00008000'00000000) libraries + stack, part 2
     36 // [0xffffffff'ff600000, 0xffffffff'ff601000) vsyscall
     37 //
     38 // Although we can ignore the vsyscall for the most part as there are few data
     39 // references there (other sanitizers ignore it), we enforce a gap inside the
     40 // library region to distinguish the vsyscall's shadow, considering this gap to
     41 // be an invalid app region.
     42 // We disallow application memory outside of those 5 regions.
     43 // Our regions assume that the stack rlimit is less than a terabyte (otherwise
     44 // the Linux kernel's default mmap region drops below 0x7e00'), which we enforce
     45 // at init time (we can support larger and unlimited sizes for shadow
     46 // scaledowns, but it is difficult for 1:1 mappings).
     47 //
     48 // Our shadow memory is scaled from a 1:1 mapping and supports a scale
     49 // specified at library initialization time that can be any power-of-2
     50 // scaledown (1x, 2x, 4x, 8x, 16x, etc.).
     51 //
     52 // We model our shadow memory after Umbra, a library used by the Dr. Memory
     53 // tool: https://github.com/DynamoRIO/drmemory/blob/master/umbra/umbra_x64.c.
     54 // We use Umbra's scheme as it was designed to support different
     55 // offsets, it supports two different shadow mappings (which we may want to
     56 // use for future tools), and it ensures that the shadow of a shadow will
     57 // not overlap either shadow memory or application memory.
     58 //
     59 // This formula translates from application memory to shadow memory:
     60 //
     61 //   shadow(app) = ((app & 0x00000fff'ffffffff) + offset) >> scale
     62 //
     63 // Where the offset for 1:1 is 0x00001300'00000000.  For other scales, the
     64 // offset is shifted left by the scale, except for scales of 1 and 2 where
     65 // it must be tweaked in order to pass the double-shadow test
     66 // (see the "shadow(shadow)" comments below):
     67 //   scale == 0: 0x00001300'000000000
     68 //   scale == 1: 0x00002200'000000000
     69 //   scale == 2: 0x00004400'000000000
     70 //   scale >= 3: (0x00001300'000000000 << scale)
     71 //
     72 // Do not pass in the open-ended end value to the formula as it will fail.
     73 //
     74 // The resulting shadow memory regions for a 0 scaling are:
     75 //
     76 // [0x00001300'00000000, 0x00001400'00000000)
     77 // [0x00001800'00000000, 0x00001a00'00000000)
     78 // [0x00002100'00000000, 0x000022ff'ff600000)
     79 // [0x000022ff'ff601000, 0x00002300'00000000)
     80 // [0x000022ff'ff600000, 0x000022ff'ff601000]
     81 //
     82 // We also want to ensure that a wild access by the application into the shadow
     83 // regions will not corrupt our own shadow memory.  shadow(shadow) ends up
     84 // disjoint from shadow(app):
     85 //
     86 // [0x00001600'00000000, 0x00001700'00000000)
     87 // [0x00001b00'00000000, 0x00001d00'00000000)
     88 // [0x00001400'00000000, 0x000015ff'ff600000]
     89 // [0x000015ff'ff601000, 0x00001600'00000000]
     90 // [0x000015ff'ff600000, 0x000015ff'ff601000]
     91 
     92 struct ApplicationRegion {
     93   uptr Start;
     94   uptr End;
     95   bool ShadowMergedWithPrev;
     96 };
     97 
     98 static const struct ApplicationRegion AppRegions[] = {
     99   {0x0000000000000000ull, 0x0000010000000000u, false},
    100   {0x0000550000000000u,   0x0000570000000000u, false},
    101   // We make one shadow mapping to hold the shadow regions for all 3 of these
    102   // app regions, as the mappings interleave, and the gap between the 3rd and
    103   // 4th scales down below a page.
    104   {0x00007e0000000000u,   0x00007fffff600000u, false},
    105   {0x00007fffff601000u,   0x0000800000000000u, true},
    106   {0xffffffffff600000u,   0xffffffffff601000u, true},
    107 };
    108 static const u32 NumAppRegions = sizeof(AppRegions)/sizeof(AppRegions[0]);
    109 
    110 // See the comment above: we do not currently support a stack size rlimit
    111 // equal to or larger than 1TB.
    112 static const uptr MaxStackSize = (1ULL << 40) - 4096;
    113 
    114 class ShadowMapping {
    115 public:
    116   static const uptr Mask = 0x00000fffffffffffu;
    117   // The scale and offset vary by tool.
    118   uptr Scale;
    119   uptr Offset;
    120   void initialize(uptr ShadowScale) {
    121     static const uptr OffsetArray[3] = {
    122         0x0000130000000000u,
    123         0x0000220000000000u,
    124         0x0000440000000000u,
    125     };
    126     Scale = ShadowScale;
    127     if (Scale <= 2)
    128       Offset = OffsetArray[Scale];
    129     else
    130       Offset = OffsetArray[0] << Scale;
    131   }
    132 };
    133 extern ShadowMapping Mapping;
    134 #else
    135 // We'll want to use templatized functions over the ShadowMapping once
    136 // we support more platforms.
    137 #error Platform not supported
    138 #endif
    139 
    140 static inline bool getAppRegion(u32 i, uptr *Start, uptr *End) {
    141   if (i >= NumAppRegions)
    142     return false;
    143   *Start = AppRegions[i].Start;
    144   *End = AppRegions[i].End;
    145   return true;
    146 }
    147 
    148 ALWAYS_INLINE
    149 bool isAppMem(uptr Mem) {
    150   for (u32 i = 0; i < NumAppRegions; ++i) {
    151     if (Mem >= AppRegions[i].Start && Mem < AppRegions[i].End)
    152       return true;
    153   }
    154   return false;
    155 }
    156 
    157 ALWAYS_INLINE
    158 uptr appToShadow(uptr App) {
    159   return (((App & ShadowMapping::Mask) + Mapping.Offset) >> Mapping.Scale);
    160 }
    161 
    162 static inline bool getShadowRegion(u32 i, uptr *Start, uptr *End) {
    163   if (i >= NumAppRegions)
    164     return false;
    165   u32 UnmergedShadowCount = 0;
    166   u32 AppIdx;
    167   for (AppIdx = 0; AppIdx < NumAppRegions; ++AppIdx) {
    168     if (!AppRegions[AppIdx].ShadowMergedWithPrev) {
    169       if (UnmergedShadowCount == i)
    170         break;
    171       UnmergedShadowCount++;
    172     }
    173   }
    174   if (AppIdx >= NumAppRegions || UnmergedShadowCount != i)
    175     return false;
    176   *Start = appToShadow(AppRegions[AppIdx].Start);
    177   // The formula fails for the end itself.
    178   *End = appToShadow(AppRegions[AppIdx].End - 1) + 1;
    179   // Merge with adjacent shadow regions:
    180   for (++AppIdx; AppIdx < NumAppRegions; ++AppIdx) {
    181     if (!AppRegions[AppIdx].ShadowMergedWithPrev)
    182       break;
    183     *Start = Min(*Start, appToShadow(AppRegions[AppIdx].Start));
    184     *End = Max(*End, appToShadow(AppRegions[AppIdx].End - 1) + 1);
    185   }
    186   return true;
    187 }
    188 
    189 ALWAYS_INLINE
    190 bool isShadowMem(uptr Mem) {
    191   // We assume this is not used on any critical performance path and so there's
    192   // no need to hardcode the mapping results.
    193   for (uptr i = 0; i < NumAppRegions; ++i) {
    194     if (Mem >= appToShadow(AppRegions[i].Start) &&
    195         Mem < appToShadow(AppRegions[i].End - 1) + 1)
    196       return true;
    197   }
    198   return false;
    199 }
    200 
    201 } // namespace __esan
    202 
    203 #endif /* ESAN_SHADOW_H */
    204