Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 // Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
     29 // comaptible parts the implementation is in platform-posix.cc.
     30 
     31 #include <pthread.h>
     32 #include <semaphore.h>
     33 #include <signal.h>
     34 #include <sys/time.h>
     35 #include <sys/resource.h>
     36 #include <sys/syscall.h>
     37 #include <sys/types.h>
     38 #include <stdlib.h>
     39 
     40 #include <sys/types.h>  // mmap & munmap
     41 #include <sys/mman.h>   // mmap & munmap
     42 #include <sys/stat.h>   // open
     43 #include <fcntl.h>      // open
     44 #include <unistd.h>     // sysconf
     45 #include <execinfo.h>   // backtrace, backtrace_symbols
     46 #include <strings.h>    // index
     47 #include <errno.h>
     48 #include <stdarg.h>
     49 
     50 #undef MAP_TYPE
     51 
     52 #include "v8.h"
     53 
     54 #include "platform-posix.h"
     55 #include "platform.h"
     56 #include "v8threads.h"
     57 #include "vm-state-inl.h"
     58 
     59 
     60 namespace v8 {
     61 namespace internal {
     62 
     63 // 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
     64 // name space and pid 0 is reserved (see man 2 kill).
     65 static const pthread_t kNoThread = (pthread_t) 0;
     66 
     67 
     68 double ceiling(double x) {
     69   return ceil(x);
     70 }
     71 
     72 
     73 static Mutex* limit_mutex = NULL;
     74 
     75 
     76 static void* GetRandomMmapAddr() {
     77   Isolate* isolate = Isolate::UncheckedCurrent();
     78   // Note that the current isolate isn't set up in a call path via
     79   // CpuFeatures::Probe. We don't care about randomization in this case because
     80   // the code page is immediately freed.
     81   if (isolate != NULL) {
     82 #ifdef V8_TARGET_ARCH_X64
     83     uint64_t rnd1 = V8::RandomPrivate(isolate);
     84     uint64_t rnd2 = V8::RandomPrivate(isolate);
     85     uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
     86     // Currently available CPUs have 48 bits of virtual addressing.  Truncate
     87     // the hint address to 46 bits to give the kernel a fighting chance of
     88     // fulfilling our placement request.
     89     raw_addr &= V8_UINT64_C(0x3ffffffff000);
     90 #else
     91     uint32_t raw_addr = V8::RandomPrivate(isolate);
     92     // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
     93     // variety of ASLR modes (PAE kernel, NX compat mode, etc).
     94     raw_addr &= 0x3ffff000;
     95     raw_addr += 0x20000000;
     96 #endif
     97     return reinterpret_cast<void*>(raw_addr);
     98   }
     99   return NULL;
    100 }
    101 
    102 
    103 void OS::SetUp() {
    104   // Seed the random number generator. We preserve microsecond resolution.
    105   uint64_t seed = Ticks() ^ (getpid() << 16);
    106   srandom(static_cast<unsigned int>(seed));
    107   limit_mutex = CreateMutex();
    108 }
    109 
    110 
    111 void OS::PostSetUp() {
    112   // Math functions depend on CPU features therefore they are initialized after
    113   // CPU.
    114   MathSetup();
    115 }
    116 
    117 
    118 uint64_t OS::CpuFeaturesImpliedByPlatform() {
    119   return 0;
    120 }
    121 
    122 
    123 int OS::ActivationFrameAlignment() {
    124   // With gcc 4.4 the tree vectorization optimizer can generate code
    125   // that requires 16 byte alignment such as movdqa on x86.
    126   return 16;
    127 }
    128 
    129 
    130 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
    131   __asm__ __volatile__("" : : : "memory");
    132   // An x86 store acts as a release barrier.
    133   *ptr = value;
    134 }
    135 
    136 
    137 const char* OS::LocalTimezone(double time) {
    138   if (isnan(time)) return "";
    139   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
    140   struct tm* t = localtime(&tv);
    141   if (NULL == t) return "";
    142   return t->tm_zone;
    143 }
    144 
    145 
    146 double OS::LocalTimeOffset() {
    147   time_t tv = time(NULL);
    148   struct tm* t = localtime(&tv);
    149   // tm_gmtoff includes any daylight savings offset, so subtract it.
    150   return static_cast<double>(t->tm_gmtoff * msPerSecond -
    151                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
    152 }
    153 
    154 
    155 // We keep the lowest and highest addresses mapped as a quick way of
    156 // determining that pointers are outside the heap (used mostly in assertions
    157 // and verification).  The estimate is conservative, i.e., not all addresses in
    158 // 'allocated' space are actually allocated to our heap.  The range is
    159 // [lowest, highest), inclusive on the low and and exclusive on the high end.
    160 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
    161 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
    162 
    163 
    164 static void UpdateAllocatedSpaceLimits(void* address, int size) {
    165   ASSERT(limit_mutex != NULL);
    166   ScopedLock lock(limit_mutex);
    167 
    168   lowest_ever_allocated = Min(lowest_ever_allocated, address);
    169   highest_ever_allocated =
    170       Max(highest_ever_allocated,
    171           reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
    172 }
    173 
    174 
    175 bool OS::IsOutsideAllocatedSpace(void* address) {
    176   return address < lowest_ever_allocated || address >= highest_ever_allocated;
    177 }
    178 
    179 
    180 size_t OS::AllocateAlignment() {
    181   return sysconf(_SC_PAGESIZE);
    182 }
    183 
    184 
    185 void* OS::Allocate(const size_t requested,
    186                    size_t* allocated,
    187                    bool is_executable) {
    188   const size_t msize = RoundUp(requested, AllocateAlignment());
    189   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    190   void* addr = GetRandomMmapAddr();
    191   void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
    192   if (mbase == MAP_FAILED) {
    193     LOG(i::Isolate::Current(),
    194         StringEvent("OS::Allocate", "mmap failed"));
    195     return NULL;
    196   }
    197   *allocated = msize;
    198   UpdateAllocatedSpaceLimits(mbase, msize);
    199   return mbase;
    200 }
    201 
    202 
    203 void OS::Free(void* address, const size_t size) {
    204   // TODO(1240712): munmap has a return value which is ignored here.
    205   int result = munmap(address, size);
    206   USE(result);
    207   ASSERT(result == 0);
    208 }
    209 
    210 
    211 void OS::Sleep(int milliseconds) {
    212   unsigned int ms = static_cast<unsigned int>(milliseconds);
    213   usleep(1000 * ms);
    214 }
    215 
    216 
    217 void OS::Abort() {
    218   // Redirect to std abort to signal abnormal program termination.
    219   abort();
    220 }
    221 
    222 
    223 void OS::DebugBreak() {
    224   asm("int $3");
    225 }
    226 
    227 
    228 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
    229  public:
    230   PosixMemoryMappedFile(FILE* file, void* memory, int size)
    231     : file_(file), memory_(memory), size_(size) { }
    232   virtual ~PosixMemoryMappedFile();
    233   virtual void* memory() { return memory_; }
    234   virtual int size() { return size_; }
    235  private:
    236   FILE* file_;
    237   void* memory_;
    238   int size_;
    239 };
    240 
    241 
    242 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
    243   FILE* file = fopen(name, "r+");
    244   if (file == NULL) return NULL;
    245 
    246   fseek(file, 0, SEEK_END);
    247   int size = ftell(file);
    248 
    249   void* memory =
    250       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
    251   return new PosixMemoryMappedFile(file, memory, size);
    252 }
    253 
    254 
    255 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
    256     void* initial) {
    257   FILE* file = fopen(name, "w+");
    258   if (file == NULL) return NULL;
    259   int result = fwrite(initial, size, 1, file);
    260   if (result < 1) {
    261     fclose(file);
    262     return NULL;
    263   }
    264   void* memory =
    265       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
    266   return new PosixMemoryMappedFile(file, memory, size);
    267 }
    268 
    269 
    270 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
    271   if (memory_) OS::Free(memory_, size_);
    272   fclose(file_);
    273 }
    274 
    275 
    276 void OS::LogSharedLibraryAddresses() {
    277   // This function assumes that the layout of the file is as follows:
    278   // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
    279   // If we encounter an unexpected situation we abort scanning further entries.
    280   FILE* fp = fopen("/proc/self/maps", "r");
    281   if (fp == NULL) return;
    282 
    283   // Allocate enough room to be able to store a full file name.
    284   const int kLibNameLen = FILENAME_MAX + 1;
    285   char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
    286 
    287   i::Isolate* isolate = ISOLATE;
    288   // This loop will terminate once the scanning hits an EOF.
    289   while (true) {
    290     uintptr_t start, end;
    291     char attr_r, attr_w, attr_x, attr_p;
    292     // Parse the addresses and permission bits at the beginning of the line.
    293     if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
    294     if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
    295 
    296     int c;
    297     if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
    298       // Found a read-only executable entry. Skip characters until we reach
    299       // the beginning of the filename or the end of the line.
    300       do {
    301         c = getc(fp);
    302       } while ((c != EOF) && (c != '\n') && (c != '/'));
    303       if (c == EOF) break;  // EOF: Was unexpected, just exit.
    304 
    305       // Process the filename if found.
    306       if (c == '/') {
    307         ungetc(c, fp);  // Push the '/' back into the stream to be read below.
    308 
    309         // Read to the end of the line. Exit if the read fails.
    310         if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
    311 
    312         // Drop the newline character read by fgets. We do not need to check
    313         // for a zero-length string because we know that we at least read the
    314         // '/' character.
    315         lib_name[strlen(lib_name) - 1] = '\0';
    316       } else {
    317         // No library name found, just record the raw address range.
    318         snprintf(lib_name, kLibNameLen,
    319                  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
    320       }
    321       LOG(isolate, SharedLibraryEvent(lib_name, start, end));
    322     } else {
    323       // Entry not describing executable data. Skip to end of line to set up
    324       // reading the next entry.
    325       do {
    326         c = getc(fp);
    327       } while ((c != EOF) && (c != '\n'));
    328       if (c == EOF) break;
    329     }
    330   }
    331   free(lib_name);
    332   fclose(fp);
    333 }
    334 
    335 
    336 static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
    337 
    338 
    339 void OS::SignalCodeMovingGC() {
    340   // Support for ll_prof.py.
    341   //
    342   // The Linux profiler built into the kernel logs all mmap's with
    343   // PROT_EXEC so that analysis tools can properly attribute ticks. We
    344   // do a mmap with a name known by ll_prof.py and immediately munmap
    345   // it. This injects a GC marker into the stream of events generated
    346   // by the kernel and allows us to synchronize V8 code log and the
    347   // kernel log.
    348   int size = sysconf(_SC_PAGESIZE);
    349   FILE* f = fopen(kGCFakeMmap, "w+");
    350   void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
    351                     fileno(f), 0);
    352   ASSERT(addr != MAP_FAILED);
    353   OS::Free(addr, size);
    354   fclose(f);
    355 }
    356 
    357 
    358 int OS::StackWalk(Vector<OS::StackFrame> frames) {
    359   // backtrace is a glibc extension.
    360   int frames_size = frames.length();
    361   ScopedVector<void*> addresses(frames_size);
    362 
    363   int frames_count = backtrace(addresses.start(), frames_size);
    364 
    365   char** symbols = backtrace_symbols(addresses.start(), frames_count);
    366   if (symbols == NULL) {
    367     return kStackWalkError;
    368   }
    369 
    370   for (int i = 0; i < frames_count; i++) {
    371     frames[i].address = addresses[i];
    372     // Format a text representation of the frame based on the information
    373     // available.
    374     SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
    375              "%s",
    376              symbols[i]);
    377     // Make sure line termination is in place.
    378     frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
    379   }
    380 
    381   free(symbols);
    382 
    383   return frames_count;
    384 }
    385 
    386 
    387 // Constants used for mmap.
    388 static const int kMmapFd = -1;
    389 static const int kMmapFdOffset = 0;
    390 
    391 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
    392 
    393 VirtualMemory::VirtualMemory(size_t size) {
    394   address_ = ReserveRegion(size);
    395   size_ = size;
    396 }
    397 
    398 
    399 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
    400     : address_(NULL), size_(0) {
    401   ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
    402   size_t request_size = RoundUp(size + alignment,
    403                                 static_cast<intptr_t>(OS::AllocateAlignment()));
    404   void* reservation = mmap(GetRandomMmapAddr(),
    405                            request_size,
    406                            PROT_NONE,
    407                            MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
    408                            kMmapFd,
    409                            kMmapFdOffset);
    410   if (reservation == MAP_FAILED) return;
    411 
    412   Address base = static_cast<Address>(reservation);
    413   Address aligned_base = RoundUp(base, alignment);
    414   ASSERT_LE(base, aligned_base);
    415 
    416   // Unmap extra memory reserved before and after the desired block.
    417   if (aligned_base != base) {
    418     size_t prefix_size = static_cast<size_t>(aligned_base - base);
    419     OS::Free(base, prefix_size);
    420     request_size -= prefix_size;
    421   }
    422 
    423   size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
    424   ASSERT_LE(aligned_size, request_size);
    425 
    426   if (aligned_size != request_size) {
    427     size_t suffix_size = request_size - aligned_size;
    428     OS::Free(aligned_base + aligned_size, suffix_size);
    429     request_size -= suffix_size;
    430   }
    431 
    432   ASSERT(aligned_size == request_size);
    433 
    434   address_ = static_cast<void*>(aligned_base);
    435   size_ = aligned_size;
    436 }
    437 
    438 
    439 VirtualMemory::~VirtualMemory() {
    440   if (IsReserved()) {
    441     bool result = ReleaseRegion(address(), size());
    442     ASSERT(result);
    443     USE(result);
    444   }
    445 }
    446 
    447 
    448 bool VirtualMemory::IsReserved() {
    449   return address_ != NULL;
    450 }
    451 
    452 
    453 void VirtualMemory::Reset() {
    454   address_ = NULL;
    455   size_ = 0;
    456 }
    457 
    458 
    459 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
    460   return CommitRegion(address, size, is_executable);
    461 }
    462 
    463 
    464 bool VirtualMemory::Uncommit(void* address, size_t size) {
    465   return UncommitRegion(address, size);
    466 }
    467 
    468 
    469 bool VirtualMemory::Guard(void* address) {
    470   OS::Guard(address, OS::CommitPageSize());
    471   return true;
    472 }
    473 
    474 
    475 void* VirtualMemory::ReserveRegion(size_t size) {
    476   void* result = mmap(GetRandomMmapAddr(),
    477                       size,
    478                       PROT_NONE,
    479                       MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
    480                       kMmapFd,
    481                       kMmapFdOffset);
    482 
    483   if (result == MAP_FAILED) return NULL;
    484 
    485   return result;
    486 }
    487 
    488 
    489 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
    490   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    491   if (MAP_FAILED == mmap(base,
    492                          size,
    493                          prot,
    494                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
    495                          kMmapFd,
    496                          kMmapFdOffset)) {
    497     return false;
    498   }
    499 
    500   UpdateAllocatedSpaceLimits(base, size);
    501   return true;
    502 }
    503 
    504 
    505 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
    506   return mmap(base,
    507               size,
    508               PROT_NONE,
    509               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
    510               kMmapFd,
    511               kMmapFdOffset) != MAP_FAILED;
    512 }
    513 
    514 
    515 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
    516   return munmap(base, size) == 0;
    517 }
    518 
    519 
    520 class Thread::PlatformData : public Malloced {
    521  public:
    522   PlatformData() : thread_(kNoThread) {}
    523 
    524   pthread_t thread_;  // Thread handle for pthread.
    525 };
    526 
    527 Thread::Thread(const Options& options)
    528     : data_(new PlatformData()),
    529       stack_size_(options.stack_size()) {
    530   set_name(options.name());
    531 }
    532 
    533 
    534 Thread::~Thread() {
    535   delete data_;
    536 }
    537 
    538 
    539 static void* ThreadEntry(void* arg) {
    540   Thread* thread = reinterpret_cast<Thread*>(arg);
    541   // This is also initialized by the first argument to pthread_create() but we
    542   // don't know which thread will run first (the original thread or the new
    543   // one) so we initialize it here too.
    544 #ifdef PR_SET_NAME
    545   prctl(PR_SET_NAME,
    546         reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
    547         0, 0, 0);
    548 #endif
    549   thread->data()->thread_ = pthread_self();
    550   ASSERT(thread->data()->thread_ != kNoThread);
    551   thread->Run();
    552   return NULL;
    553 }
    554 
    555 
    556 void Thread::set_name(const char* name) {
    557   strncpy(name_, name, sizeof(name_));
    558   name_[sizeof(name_) - 1] = '\0';
    559 }
    560 
    561 
    562 void Thread::Start() {
    563   pthread_attr_t* attr_ptr = NULL;
    564   pthread_attr_t attr;
    565   if (stack_size_ > 0) {
    566     pthread_attr_init(&attr);
    567     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
    568     attr_ptr = &attr;
    569   }
    570   pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
    571   ASSERT(data_->thread_ != kNoThread);
    572 }
    573 
    574 
    575 void Thread::Join() {
    576   pthread_join(data_->thread_, NULL);
    577 }
    578 
    579 
    580 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
    581   pthread_key_t key;
    582   int result = pthread_key_create(&key, NULL);
    583   USE(result);
    584   ASSERT(result == 0);
    585   return static_cast<LocalStorageKey>(key);
    586 }
    587 
    588 
    589 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
    590   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
    591   int result = pthread_key_delete(pthread_key);
    592   USE(result);
    593   ASSERT(result == 0);
    594 }
    595 
    596 
    597 void* Thread::GetThreadLocal(LocalStorageKey key) {
    598   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
    599   return pthread_getspecific(pthread_key);
    600 }
    601 
    602 
    603 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
    604   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
    605   pthread_setspecific(pthread_key, value);
    606 }
    607 
    608 
    609 void Thread::YieldCPU() {
    610   sched_yield();
    611 }
    612 
    613 
    614 class OpenBSDMutex : public Mutex {
    615  public:
    616   OpenBSDMutex() {
    617     pthread_mutexattr_t attrs;
    618     int result = pthread_mutexattr_init(&attrs);
    619     ASSERT(result == 0);
    620     result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
    621     ASSERT(result == 0);
    622     result = pthread_mutex_init(&mutex_, &attrs);
    623     ASSERT(result == 0);
    624     USE(result);
    625   }
    626 
    627   virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
    628 
    629   virtual int Lock() {
    630     int result = pthread_mutex_lock(&mutex_);
    631     return result;
    632   }
    633 
    634   virtual int Unlock() {
    635     int result = pthread_mutex_unlock(&mutex_);
    636     return result;
    637   }
    638 
    639   virtual bool TryLock() {
    640     int result = pthread_mutex_trylock(&mutex_);
    641     // Return false if the lock is busy and locking failed.
    642     if (result == EBUSY) {
    643       return false;
    644     }
    645     ASSERT(result == 0);  // Verify no other errors.
    646     return true;
    647   }
    648 
    649  private:
    650   pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
    651 };
    652 
    653 
    654 Mutex* OS::CreateMutex() {
    655   return new OpenBSDMutex();
    656 }
    657 
    658 
    659 class OpenBSDSemaphore : public Semaphore {
    660  public:
    661   explicit OpenBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
    662   virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
    663 
    664   virtual void Wait();
    665   virtual bool Wait(int timeout);
    666   virtual void Signal() { sem_post(&sem_); }
    667  private:
    668   sem_t sem_;
    669 };
    670 
    671 
    672 void OpenBSDSemaphore::Wait() {
    673   while (true) {
    674     int result = sem_wait(&sem_);
    675     if (result == 0) return;  // Successfully got semaphore.
    676     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
    677   }
    678 }
    679 
    680 
    681 #ifndef TIMEVAL_TO_TIMESPEC
    682 #define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
    683     (ts)->tv_sec = (tv)->tv_sec;                                    \
    684     (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
    685 } while (false)
    686 #endif
    687 
    688 
    689 bool OpenBSDSemaphore::Wait(int timeout) {
    690   const long kOneSecondMicros = 1000000;  // NOLINT
    691 
    692   // Split timeout into second and nanosecond parts.
    693   struct timeval delta;
    694   delta.tv_usec = timeout % kOneSecondMicros;
    695   delta.tv_sec = timeout / kOneSecondMicros;
    696 
    697   struct timeval current_time;
    698   // Get the current time.
    699   if (gettimeofday(&current_time, NULL) == -1) {
    700     return false;
    701   }
    702 
    703   // Calculate time for end of timeout.
    704   struct timeval end_time;
    705   timeradd(&current_time, &delta, &end_time);
    706 
    707   struct timespec ts;
    708   TIMEVAL_TO_TIMESPEC(&end_time, &ts);
    709 
    710   int to = ts.tv_sec;
    711 
    712   while (true) {
    713     int result = sem_trywait(&sem_);
    714     if (result == 0) return true;  // Successfully got semaphore.
    715     if (!to) return false;  // Timeout.
    716     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
    717     usleep(ts.tv_nsec / 1000);
    718     to--;
    719   }
    720 }
    721 
    722 Semaphore* OS::CreateSemaphore(int count) {
    723   return new OpenBSDSemaphore(count);
    724 }
    725 
    726 
    727 static pthread_t GetThreadID() {
    728   return pthread_self();
    729 }
    730 
    731 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
    732   USE(info);
    733   if (signal != SIGPROF) return;
    734   Isolate* isolate = Isolate::UncheckedCurrent();
    735   if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
    736     // We require a fully initialized and entered isolate.
    737     return;
    738   }
    739   if (v8::Locker::IsActive() &&
    740       !isolate->thread_manager()->IsLockedByCurrentThread()) {
    741     return;
    742   }
    743 
    744   Sampler* sampler = isolate->logger()->sampler();
    745   if (sampler == NULL || !sampler->IsActive()) return;
    746 
    747   TickSample sample_obj;
    748   TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
    749   if (sample == NULL) sample = &sample_obj;
    750 
    751   // Extracting the sample from the context is extremely machine dependent.
    752   sample->state = isolate->current_vm_state();
    753   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
    754 #ifdef __NetBSD__
    755   mcontext_t& mcontext = ucontext->uc_mcontext;
    756 #if V8_HOST_ARCH_IA32
    757   sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
    758   sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
    759   sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
    760 #elif V8_HOST_ARCH_X64
    761   sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
    762   sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
    763   sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
    764 #endif  // V8_HOST_ARCH
    765 #else  // OpenBSD
    766 #if V8_HOST_ARCH_IA32
    767   sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
    768   sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
    769   sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
    770 #elif V8_HOST_ARCH_X64
    771   sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
    772   sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
    773   sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
    774 #endif  // V8_HOST_ARCH
    775 #endif  // __NetBSD__
    776   sampler->SampleStack(sample);
    777   sampler->Tick(sample);
    778 }
    779 
    780 
    781 class Sampler::PlatformData : public Malloced {
    782  public:
    783   PlatformData() : vm_tid_(GetThreadID()) {}
    784 
    785   pthread_t vm_tid() const { return vm_tid_; }
    786 
    787  private:
    788   pthread_t vm_tid_;
    789 };
    790 
    791 
    792 class SignalSender : public Thread {
    793  public:
    794   enum SleepInterval {
    795     HALF_INTERVAL,
    796     FULL_INTERVAL
    797   };
    798 
    799   static const int kSignalSenderStackSize = 64 * KB;
    800 
    801   explicit SignalSender(int interval)
    802       : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
    803         vm_tgid_(getpid()),
    804         interval_(interval) {}
    805 
    806   static void InstallSignalHandler() {
    807     struct sigaction sa;
    808     sa.sa_sigaction = ProfilerSignalHandler;
    809     sigemptyset(&sa.sa_mask);
    810     sa.sa_flags = SA_RESTART | SA_SIGINFO;
    811     signal_handler_installed_ =
    812         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
    813   }
    814 
    815   static void RestoreSignalHandler() {
    816     if (signal_handler_installed_) {
    817       sigaction(SIGPROF, &old_signal_handler_, 0);
    818       signal_handler_installed_ = false;
    819     }
    820   }
    821 
    822   static void AddActiveSampler(Sampler* sampler) {
    823     ScopedLock lock(mutex_.Pointer());
    824     SamplerRegistry::AddActiveSampler(sampler);
    825     if (instance_ == NULL) {
    826       // Start a thread that will send SIGPROF signal to VM threads,
    827       // when CPU profiling will be enabled.
    828       instance_ = new SignalSender(sampler->interval());
    829       instance_->Start();
    830     } else {
    831       ASSERT(instance_->interval_ == sampler->interval());
    832     }
    833   }
    834 
    835   static void RemoveActiveSampler(Sampler* sampler) {
    836     ScopedLock lock(mutex_.Pointer());
    837     SamplerRegistry::RemoveActiveSampler(sampler);
    838     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
    839       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
    840       delete instance_;
    841       instance_ = NULL;
    842       RestoreSignalHandler();
    843     }
    844   }
    845 
    846   // Implement Thread::Run().
    847   virtual void Run() {
    848     SamplerRegistry::State state;
    849     while ((state = SamplerRegistry::GetState()) !=
    850            SamplerRegistry::HAS_NO_SAMPLERS) {
    851       bool cpu_profiling_enabled =
    852           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
    853       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
    854       if (cpu_profiling_enabled && !signal_handler_installed_) {
    855         InstallSignalHandler();
    856       } else if (!cpu_profiling_enabled && signal_handler_installed_) {
    857         RestoreSignalHandler();
    858       }
    859       // When CPU profiling is enabled both JavaScript and C++ code is
    860       // profiled. We must not suspend.
    861       if (!cpu_profiling_enabled) {
    862         if (rate_limiter_.SuspendIfNecessary()) continue;
    863       }
    864       if (cpu_profiling_enabled && runtime_profiler_enabled) {
    865         if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
    866           return;
    867         }
    868         Sleep(HALF_INTERVAL);
    869         if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
    870           return;
    871         }
    872         Sleep(HALF_INTERVAL);
    873       } else {
    874         if (cpu_profiling_enabled) {
    875           if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
    876                                                       this)) {
    877             return;
    878           }
    879         }
    880         if (runtime_profiler_enabled) {
    881           if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
    882                                                       NULL)) {
    883             return;
    884           }
    885         }
    886         Sleep(FULL_INTERVAL);
    887       }
    888     }
    889   }
    890 
    891   static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
    892     if (!sampler->IsProfiling()) return;
    893     SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
    894     sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
    895   }
    896 
    897   static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
    898     if (!sampler->isolate()->IsInitialized()) return;
    899     sampler->isolate()->runtime_profiler()->NotifyTick();
    900   }
    901 
    902   void SendProfilingSignal(pthread_t tid) {
    903     if (!signal_handler_installed_) return;
    904     pthread_kill(tid, SIGPROF);
    905   }
    906 
    907   void Sleep(SleepInterval full_or_half) {
    908     // Convert ms to us and subtract 100 us to compensate delays
    909     // occuring during signal delivery.
    910     useconds_t interval = interval_ * 1000 - 100;
    911     if (full_or_half == HALF_INTERVAL) interval /= 2;
    912     int result = usleep(interval);
    913 #ifdef DEBUG
    914     if (result != 0 && errno != EINTR) {
    915       fprintf(stderr,
    916               "SignalSender usleep error; interval = %u, errno = %d\n",
    917               interval,
    918               errno);
    919       ASSERT(result == 0 || errno == EINTR);
    920     }
    921 #endif
    922     USE(result);
    923   }
    924 
    925   const int vm_tgid_;
    926   const int interval_;
    927   RuntimeProfilerRateLimiter rate_limiter_;
    928 
    929   // Protects the process wide state below.
    930   static LazyMutex mutex_;
    931   static SignalSender* instance_;
    932   static bool signal_handler_installed_;
    933   static struct sigaction old_signal_handler_;
    934 
    935  private:
    936   DISALLOW_COPY_AND_ASSIGN(SignalSender);
    937 };
    938 
    939 
    940 LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
    941 SignalSender* SignalSender::instance_ = NULL;
    942 struct sigaction SignalSender::old_signal_handler_;
    943 bool SignalSender::signal_handler_installed_ = false;
    944 
    945 
    946 Sampler::Sampler(Isolate* isolate, int interval)
    947     : isolate_(isolate),
    948       interval_(interval),
    949       profiling_(false),
    950       active_(false),
    951       samples_taken_(0) {
    952   data_ = new PlatformData;
    953 }
    954 
    955 
    956 Sampler::~Sampler() {
    957   ASSERT(!IsActive());
    958   delete data_;
    959 }
    960 
    961 
    962 void Sampler::Start() {
    963   ASSERT(!IsActive());
    964   SetActive(true);
    965   SignalSender::AddActiveSampler(this);
    966 }
    967 
    968 
    969 void Sampler::Stop() {
    970   ASSERT(IsActive());
    971   SignalSender::RemoveActiveSampler(this);
    972   SetActive(false);
    973 }
    974 
    975 
    976 } }  // namespace v8::internal
    977