Home | History | Annotate | Download | only in platform
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // Platform-specific code for MacOS goes here. For the POSIX-compatible
      6 // parts, the implementation is in platform-posix.cc.
      7 
      8 #include <dlfcn.h>
      9 #include <mach/mach_init.h>
     10 #include <mach-o/dyld.h>
     11 #include <mach-o/getsect.h>
     12 #include <sys/mman.h>
     13 #include <unistd.h>
     14 
     15 #include <AvailabilityMacros.h>
     16 
     17 #include <errno.h>
     18 #include <libkern/OSAtomic.h>
     19 #include <mach/mach.h>
     20 #include <mach/semaphore.h>
     21 #include <mach/task.h>
     22 #include <mach/vm_statistics.h>
     23 #include <pthread.h>
     24 #include <semaphore.h>
     25 #include <signal.h>
     26 #include <stdarg.h>
     27 #include <stdlib.h>
     28 #include <string.h>
     29 #include <sys/resource.h>
     30 #include <sys/sysctl.h>
     31 #include <sys/time.h>
     32 #include <sys/types.h>
     33 
     34 #include <cmath>
     35 
     36 #undef MAP_TYPE
     37 
     38 #include "src/base/macros.h"
     39 #include "src/base/platform/platform.h"
     40 
     41 
     42 namespace v8 {
     43 namespace base {
     44 
     45 
     46 // Constants used for mmap.
     47 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
     48 // defined tag 255 This helps identify V8-allocated regions in memory analysis
     49 // tools like vmmap(1).
     50 static const int kMmapFd = VM_MAKE_TAG(255);
     51 static const off_t kMmapFdOffset = 0;
     52 
     53 
     54 void* OS::Allocate(const size_t requested,
     55                    size_t* allocated,
     56                    bool is_executable) {
     57   const size_t msize = RoundUp(requested, getpagesize());
     58   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
     59   void* mbase = mmap(OS::GetRandomMmapAddr(),
     60                      msize,
     61                      prot,
     62                      MAP_PRIVATE | MAP_ANON,
     63                      kMmapFd,
     64                      kMmapFdOffset);
     65   if (mbase == MAP_FAILED) return NULL;
     66   *allocated = msize;
     67   return mbase;
     68 }
     69 
     70 
     71 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
     72   std::vector<SharedLibraryAddress> result;
     73   unsigned int images_count = _dyld_image_count();
     74   for (unsigned int i = 0; i < images_count; ++i) {
     75     const mach_header* header = _dyld_get_image_header(i);
     76     if (header == NULL) continue;
     77 #if V8_HOST_ARCH_X64
     78     uint64_t size;
     79     char* code_ptr = getsectdatafromheader_64(
     80         reinterpret_cast<const mach_header_64*>(header),
     81         SEG_TEXT,
     82         SECT_TEXT,
     83         &size);
     84 #else
     85     unsigned int size;
     86     char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
     87 #endif
     88     if (code_ptr == NULL) continue;
     89     const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
     90     const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
     91     result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
     92                                           start + size, slide));
     93   }
     94   return result;
     95 }
     96 
     97 
     98 void OS::SignalCodeMovingGC() {
     99 }
    100 
    101 
    102 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
    103   if (std::isnan(time)) return "";
    104   time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
    105   struct tm tm;
    106   struct tm* t = localtime_r(&tv, &tm);
    107   if (NULL == t) return "";
    108   return t->tm_zone;
    109 }
    110 
    111 
    112 double OS::LocalTimeOffset(TimezoneCache* cache) {
    113   time_t tv = time(NULL);
    114   struct tm tm;
    115   struct tm* t = localtime_r(&tv, &tm);
    116   // tm_gmtoff includes any daylight savings offset, so subtract it.
    117   return static_cast<double>(t->tm_gmtoff * msPerSecond -
    118                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
    119 }
    120 
    121 
    122 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
    123 
    124 
    125 VirtualMemory::VirtualMemory(size_t size)
    126     : address_(ReserveRegion(size)), size_(size) { }
    127 
    128 
    129 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
    130     : address_(NULL), size_(0) {
    131   DCHECK((alignment % OS::AllocateAlignment()) == 0);
    132   size_t request_size = RoundUp(size + alignment,
    133                                 static_cast<intptr_t>(OS::AllocateAlignment()));
    134   void* reservation = mmap(OS::GetRandomMmapAddr(),
    135                            request_size,
    136                            PROT_NONE,
    137                            MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
    138                            kMmapFd,
    139                            kMmapFdOffset);
    140   if (reservation == MAP_FAILED) return;
    141 
    142   uint8_t* base = static_cast<uint8_t*>(reservation);
    143   uint8_t* aligned_base = RoundUp(base, alignment);
    144   DCHECK_LE(base, aligned_base);
    145 
    146   // Unmap extra memory reserved before and after the desired block.
    147   if (aligned_base != base) {
    148     size_t prefix_size = static_cast<size_t>(aligned_base - base);
    149     OS::Free(base, prefix_size);
    150     request_size -= prefix_size;
    151   }
    152 
    153   size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
    154   DCHECK_LE(aligned_size, request_size);
    155 
    156   if (aligned_size != request_size) {
    157     size_t suffix_size = request_size - aligned_size;
    158     OS::Free(aligned_base + aligned_size, suffix_size);
    159     request_size -= suffix_size;
    160   }
    161 
    162   DCHECK(aligned_size == request_size);
    163 
    164   address_ = static_cast<void*>(aligned_base);
    165   size_ = aligned_size;
    166 }
    167 
    168 
    169 VirtualMemory::~VirtualMemory() {
    170   if (IsReserved()) {
    171     bool result = ReleaseRegion(address(), size());
    172     DCHECK(result);
    173     USE(result);
    174   }
    175 }
    176 
    177 
    178 bool VirtualMemory::IsReserved() {
    179   return address_ != NULL;
    180 }
    181 
    182 
    183 void VirtualMemory::Reset() {
    184   address_ = NULL;
    185   size_ = 0;
    186 }
    187 
    188 
    189 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
    190   return CommitRegion(address, size, is_executable);
    191 }
    192 
    193 
    194 bool VirtualMemory::Uncommit(void* address, size_t size) {
    195   return UncommitRegion(address, size);
    196 }
    197 
    198 
    199 bool VirtualMemory::Guard(void* address) {
    200   OS::Guard(address, OS::CommitPageSize());
    201   return true;
    202 }
    203 
    204 
    205 void* VirtualMemory::ReserveRegion(size_t size) {
    206   void* result = mmap(OS::GetRandomMmapAddr(),
    207                       size,
    208                       PROT_NONE,
    209                       MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
    210                       kMmapFd,
    211                       kMmapFdOffset);
    212 
    213   if (result == MAP_FAILED) return NULL;
    214 
    215   return result;
    216 }
    217 
    218 
    219 bool VirtualMemory::CommitRegion(void* address,
    220                                  size_t size,
    221                                  bool is_executable) {
    222   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    223   if (MAP_FAILED == mmap(address,
    224                          size,
    225                          prot,
    226                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
    227                          kMmapFd,
    228                          kMmapFdOffset)) {
    229     return false;
    230   }
    231   return true;
    232 }
    233 
    234 
    235 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
    236   return mmap(address,
    237               size,
    238               PROT_NONE,
    239               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
    240               kMmapFd,
    241               kMmapFdOffset) != MAP_FAILED;
    242 }
    243 
    244 bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
    245                                          void* free_start, size_t free_size) {
    246   return munmap(free_start, free_size) == 0;
    247 }
    248 
    249 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
    250   return munmap(address, size) == 0;
    251 }
    252 
    253 bool VirtualMemory::HasLazyCommits() { return true; }
    254 
    255 }  // namespace base
    256 }  // namespace v8
    257