Home | History | Annotate | Download | only in src
      1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 // Platform specific code for Linux goes here. For the POSIX comaptible parts
     29 // the implementation is in platform-posix.cc.
     30 
     31 #include <pthread.h>
     32 #include <semaphore.h>
     33 #include <signal.h>
     34 #include <sys/prctl.h>
     35 #include <sys/time.h>
     36 #include <sys/resource.h>
     37 #include <sys/syscall.h>
     38 #include <sys/types.h>
     39 #include <stdlib.h>
     40 
     41 // Ubuntu Dapper requires memory pages to be marked as
     42 // executable. Otherwise, OS raises an exception when executing code
     43 // in that page.
     44 #include <sys/types.h>  // mmap & munmap
     45 #include <sys/mman.h>   // mmap & munmap
     46 #include <sys/stat.h>   // open
     47 #include <fcntl.h>      // open
     48 #include <unistd.h>     // sysconf
     49 #ifdef __GLIBC__
     50 #include <execinfo.h>   // backtrace, backtrace_symbols
     51 #endif  // def __GLIBC__
     52 #include <strings.h>    // index
     53 #include <errno.h>
     54 #include <stdarg.h>
     55 
     56 #undef MAP_TYPE
     57 
     58 #include "v8.h"
     59 
     60 #include "platform.h"
     61 #include "v8threads.h"
     62 #include "vm-state-inl.h"
     63 
     64 
     65 namespace v8 {
     66 namespace internal {
     67 
     68 // 0 is never a valid thread id on Linux since tids and pids share a
     69 // name space and pid 0 is reserved (see man 2 kill).
     70 static const pthread_t kNoThread = (pthread_t) 0;
     71 
     72 
     73 double ceiling(double x) {
     74   return ceil(x);
     75 }
     76 
     77 
     78 static Mutex* limit_mutex = NULL;
     79 
     80 
     81 void OS::Setup() {
     82   // Seed the random number generator.
     83   // Convert the current time to a 64-bit integer first, before converting it
     84   // to an unsigned. Going directly can cause an overflow and the seed to be
     85   // set to all ones. The seed will be identical for different instances that
     86   // call this setup code within the same millisecond.
     87   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
     88   srandom(static_cast<unsigned int>(seed));
     89   limit_mutex = CreateMutex();
     90 }
     91 
     92 
     93 uint64_t OS::CpuFeaturesImpliedByPlatform() {
     94 #if (defined(__VFP_FP__) && !defined(__SOFTFP__))
     95   // Here gcc is telling us that we are on an ARM and gcc is assuming
     96   // that we have VFP3 instructions.  If gcc can assume it then so can
     97   // we. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
     98   return 1u << VFP3 | 1u << ARMv7;
     99 #elif CAN_USE_ARMV7_INSTRUCTIONS
    100   return 1u << ARMv7;
    101 #elif(defined(__mips_hard_float) && __mips_hard_float != 0)
    102     // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
    103     // have FPU instructions.  If gcc can assume it then so can we.
    104     return 1u << FPU;
    105 #else
    106   return 0;  // Linux runs on anything.
    107 #endif
    108 }
    109 
    110 
    111 #ifdef __arm__
    112 static bool CPUInfoContainsString(const char * search_string) {
    113   const char* file_name = "/proc/cpuinfo";
    114   // This is written as a straight shot one pass parser
    115   // and not using STL string and ifstream because,
    116   // on Linux, it's reading from a (non-mmap-able)
    117   // character special device.
    118   FILE* f = NULL;
    119   const char* what = search_string;
    120 
    121   if (NULL == (f = fopen(file_name, "r")))
    122     return false;
    123 
    124   int k;
    125   while (EOF != (k = fgetc(f))) {
    126     if (k == *what) {
    127       ++what;
    128       while ((*what != '\0') && (*what == fgetc(f))) {
    129         ++what;
    130       }
    131       if (*what == '\0') {
    132         fclose(f);
    133         return true;
    134       } else {
    135         what = search_string;
    136       }
    137     }
    138   }
    139   fclose(f);
    140 
    141   // Did not find string in the proc file.
    142   return false;
    143 }
    144 
    145 bool OS::ArmCpuHasFeature(CpuFeature feature) {
    146   const char* search_string = NULL;
    147   // Simple detection of VFP at runtime for Linux.
    148   // It is based on /proc/cpuinfo, which reveals hardware configuration
    149   // to user-space applications.  According to ARM (mid 2009), no similar
    150   // facility is universally available on the ARM architectures,
    151   // so it's up to individual OSes to provide such.
    152   switch (feature) {
    153     case VFP3:
    154       search_string = "vfpv3";
    155       break;
    156     case ARMv7:
    157       search_string = "ARMv7";
    158       break;
    159     default:
    160       UNREACHABLE();
    161   }
    162 
    163   if (CPUInfoContainsString(search_string)) {
    164     return true;
    165   }
    166 
    167   if (feature == VFP3) {
    168     // Some old kernels will report vfp not vfpv3. Here we make a last attempt
    169     // to detect vfpv3 by checking for vfp *and* neon, since neon is only
    170     // available on architectures with vfpv3.
    171     // Checking neon on its own is not enough as it is possible to have neon
    172     // without vfp.
    173     if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
    174       return true;
    175     }
    176   }
    177 
    178   return false;
    179 }
    180 #endif  // def __arm__
    181 
    182 
    183 #ifdef __mips__
    184 bool OS::MipsCpuHasFeature(CpuFeature feature) {
    185   const char* search_string = NULL;
    186   const char* file_name = "/proc/cpuinfo";
    187   // Simple detection of FPU at runtime for Linux.
    188   // It is based on /proc/cpuinfo, which reveals hardware configuration
    189   // to user-space applications.  According to MIPS (early 2010), no similar
    190   // facility is universally available on the MIPS architectures,
    191   // so it's up to individual OSes to provide such.
    192   //
    193   // This is written as a straight shot one pass parser
    194   // and not using STL string and ifstream because,
    195   // on Linux, it's reading from a (non-mmap-able)
    196   // character special device.
    197 
    198   switch (feature) {
    199     case FPU:
    200       search_string = "FPU";
    201       break;
    202     default:
    203       UNREACHABLE();
    204   }
    205 
    206   FILE* f = NULL;
    207   const char* what = search_string;
    208 
    209   if (NULL == (f = fopen(file_name, "r")))
    210     return false;
    211 
    212   int k;
    213   while (EOF != (k = fgetc(f))) {
    214     if (k == *what) {
    215       ++what;
    216       while ((*what != '\0') && (*what == fgetc(f))) {
    217         ++what;
    218       }
    219       if (*what == '\0') {
    220         fclose(f);
    221         return true;
    222       } else {
    223         what = search_string;
    224       }
    225     }
    226   }
    227   fclose(f);
    228 
    229   // Did not find string in the proc file.
    230   return false;
    231 }
    232 #endif  // def __mips__
    233 
    234 
    235 int OS::ActivationFrameAlignment() {
    236 #ifdef V8_TARGET_ARCH_ARM
    237   // On EABI ARM targets this is required for fp correctness in the
    238   // runtime system.
    239   return 8;
    240 #elif V8_TARGET_ARCH_MIPS
    241   return 8;
    242 #endif
    243   // With gcc 4.4 the tree vectorization optimizer can generate code
    244   // that requires 16 byte alignment such as movdqa on x86.
    245   return 16;
    246 }
    247 
    248 
    249 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
    250 #if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
    251     (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
    252   // Only use on ARM or MIPS hardware.
    253   MemoryBarrier();
    254 #else
    255   __asm__ __volatile__("" : : : "memory");
    256   // An x86 store acts as a release barrier.
    257 #endif
    258   *ptr = value;
    259 }
    260 
    261 
    262 const char* OS::LocalTimezone(double time) {
    263   if (isnan(time)) return "";
    264   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
    265   struct tm* t = localtime(&tv);
    266   if (NULL == t) return "";
    267   return t->tm_zone;
    268 }
    269 
    270 
    271 double OS::LocalTimeOffset() {
    272   time_t tv = time(NULL);
    273   struct tm* t = localtime(&tv);
    274   // tm_gmtoff includes any daylight savings offset, so subtract it.
    275   return static_cast<double>(t->tm_gmtoff * msPerSecond -
    276                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
    277 }
    278 
    279 
    280 // We keep the lowest and highest addresses mapped as a quick way of
    281 // determining that pointers are outside the heap (used mostly in assertions
    282 // and verification).  The estimate is conservative, ie, not all addresses in
    283 // 'allocated' space are actually allocated to our heap.  The range is
    284 // [lowest, highest), inclusive on the low and and exclusive on the high end.
    285 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
    286 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
    287 
    288 
    289 static void UpdateAllocatedSpaceLimits(void* address, int size) {
    290   ASSERT(limit_mutex != NULL);
    291   ScopedLock lock(limit_mutex);
    292 
    293   lowest_ever_allocated = Min(lowest_ever_allocated, address);
    294   highest_ever_allocated =
    295       Max(highest_ever_allocated,
    296           reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
    297 }
    298 
    299 
    300 bool OS::IsOutsideAllocatedSpace(void* address) {
    301   return address < lowest_ever_allocated || address >= highest_ever_allocated;
    302 }
    303 
    304 
    305 size_t OS::AllocateAlignment() {
    306   return sysconf(_SC_PAGESIZE);
    307 }
    308 
    309 
    310 void* OS::Allocate(const size_t requested,
    311                    size_t* allocated,
    312                    bool is_executable) {
    313   // TODO(805): Port randomization of allocated executable memory to Linux.
    314   const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
    315   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    316   void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    317   if (mbase == MAP_FAILED) {
    318     LOG(i::Isolate::Current(),
    319         StringEvent("OS::Allocate", "mmap failed"));
    320     return NULL;
    321   }
    322   *allocated = msize;
    323   UpdateAllocatedSpaceLimits(mbase, msize);
    324   return mbase;
    325 }
    326 
    327 
    328 void OS::Free(void* address, const size_t size) {
    329   // TODO(1240712): munmap has a return value which is ignored here.
    330   int result = munmap(address, size);
    331   USE(result);
    332   ASSERT(result == 0);
    333 }
    334 
    335 
    336 #ifdef ENABLE_HEAP_PROTECTION
    337 
    338 void OS::Protect(void* address, size_t size) {
    339   // TODO(1240712): mprotect has a return value which is ignored here.
    340   mprotect(address, size, PROT_READ);
    341 }
    342 
    343 
    344 void OS::Unprotect(void* address, size_t size, bool is_executable) {
    345   // TODO(1240712): mprotect has a return value which is ignored here.
    346   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    347   mprotect(address, size, prot);
    348 }
    349 
    350 #endif
    351 
    352 
    353 void OS::Sleep(int milliseconds) {
    354   unsigned int ms = static_cast<unsigned int>(milliseconds);
    355   usleep(1000 * ms);
    356 }
    357 
    358 
    359 void OS::Abort() {
    360   // Redirect to std abort to signal abnormal program termination.
    361   abort();
    362 }
    363 
    364 
    365 void OS::DebugBreak() {
    366 // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
    367 //  which is the architecture of generated code).
    368 #if (defined(__arm__) || defined(__thumb__))
    369 # if defined(CAN_USE_ARMV5_INSTRUCTIONS)
    370   asm("bkpt 0");
    371 # endif
    372 #elif defined(__mips__)
    373   asm("break");
    374 #else
    375   asm("int $3");
    376 #endif
    377 }
    378 
    379 
    380 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
    381  public:
    382   PosixMemoryMappedFile(FILE* file, void* memory, int size)
    383     : file_(file), memory_(memory), size_(size) { }
    384   virtual ~PosixMemoryMappedFile();
    385   virtual void* memory() { return memory_; }
    386   virtual int size() { return size_; }
    387  private:
    388   FILE* file_;
    389   void* memory_;
    390   int size_;
    391 };
    392 
    393 
    394 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
    395   FILE* file = fopen(name, "r+");
    396   if (file == NULL) return NULL;
    397 
    398   fseek(file, 0, SEEK_END);
    399   int size = ftell(file);
    400 
    401   void* memory =
    402       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
    403   return new PosixMemoryMappedFile(file, memory, size);
    404 }
    405 
    406 
    407 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
    408     void* initial) {
    409   FILE* file = fopen(name, "w+");
    410   if (file == NULL) return NULL;
    411   int result = fwrite(initial, size, 1, file);
    412   if (result < 1) {
    413     fclose(file);
    414     return NULL;
    415   }
    416   void* memory =
    417       mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
    418   return new PosixMemoryMappedFile(file, memory, size);
    419 }
    420 
    421 
    422 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
    423   if (memory_) munmap(memory_, size_);
    424   fclose(file_);
    425 }
    426 
    427 
    428 void OS::LogSharedLibraryAddresses() {
    429 #ifdef ENABLE_LOGGING_AND_PROFILING
    430   // This function assumes that the layout of the file is as follows:
    431   // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
    432   // If we encounter an unexpected situation we abort scanning further entries.
    433   FILE* fp = fopen("/proc/self/maps", "r");
    434   if (fp == NULL) return;
    435 
    436   // Allocate enough room to be able to store a full file name.
    437   const int kLibNameLen = FILENAME_MAX + 1;
    438   char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
    439 
    440   i::Isolate* isolate = ISOLATE;
    441   // This loop will terminate once the scanning hits an EOF.
    442   while (true) {
    443     uintptr_t start, end;
    444     char attr_r, attr_w, attr_x, attr_p;
    445     // Parse the addresses and permission bits at the beginning of the line.
    446     if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
    447     if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
    448 
    449     int c;
    450     if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
    451       // Found a read-only executable entry. Skip characters until we reach
    452       // the beginning of the filename or the end of the line.
    453       do {
    454         c = getc(fp);
    455       } while ((c != EOF) && (c != '\n') && (c != '/'));
    456       if (c == EOF) break;  // EOF: Was unexpected, just exit.
    457 
    458       // Process the filename if found.
    459       if (c == '/') {
    460         ungetc(c, fp);  // Push the '/' back into the stream to be read below.
    461 
    462         // Read to the end of the line. Exit if the read fails.
    463         if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
    464 
    465         // Drop the newline character read by fgets. We do not need to check
    466         // for a zero-length string because we know that we at least read the
    467         // '/' character.
    468         lib_name[strlen(lib_name) - 1] = '\0';
    469       } else {
    470         // No library name found, just record the raw address range.
    471         snprintf(lib_name, kLibNameLen,
    472                  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
    473       }
    474       LOG(isolate, SharedLibraryEvent(lib_name, start, end));
    475     } else {
    476       // Entry not describing executable data. Skip to end of line to setup
    477       // reading the next entry.
    478       do {
    479         c = getc(fp);
    480       } while ((c != EOF) && (c != '\n'));
    481       if (c == EOF) break;
    482     }
    483   }
    484   free(lib_name);
    485   fclose(fp);
    486 #endif
    487 }
    488 
    489 
    490 static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
    491 
    492 
    493 void OS::SignalCodeMovingGC() {
    494 #ifdef ENABLE_LOGGING_AND_PROFILING
    495   // Support for ll_prof.py.
    496   //
    497   // The Linux profiler built into the kernel logs all mmap's with
    498   // PROT_EXEC so that analysis tools can properly attribute ticks. We
    499   // do a mmap with a name known by ll_prof.py and immediately munmap
    500   // it. This injects a GC marker into the stream of events generated
    501   // by the kernel and allows us to synchronize V8 code log and the
    502   // kernel log.
    503   int size = sysconf(_SC_PAGESIZE);
    504   FILE* f = fopen(kGCFakeMmap, "w+");
    505   void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
    506                     fileno(f), 0);
    507   ASSERT(addr != MAP_FAILED);
    508   munmap(addr, size);
    509   fclose(f);
    510 #endif
    511 }
    512 
    513 
    514 int OS::StackWalk(Vector<OS::StackFrame> frames) {
    515   // backtrace is a glibc extension.
    516 #ifdef __GLIBC__
    517   int frames_size = frames.length();
    518   ScopedVector<void*> addresses(frames_size);
    519 
    520   int frames_count = backtrace(addresses.start(), frames_size);
    521 
    522   char** symbols = backtrace_symbols(addresses.start(), frames_count);
    523   if (symbols == NULL) {
    524     return kStackWalkError;
    525   }
    526 
    527   for (int i = 0; i < frames_count; i++) {
    528     frames[i].address = addresses[i];
    529     // Format a text representation of the frame based on the information
    530     // available.
    531     SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
    532              "%s",
    533              symbols[i]);
    534     // Make sure line termination is in place.
    535     frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
    536   }
    537 
    538   free(symbols);
    539 
    540   return frames_count;
    541 #else  // ndef __GLIBC__
    542   return 0;
    543 #endif  // ndef __GLIBC__
    544 }
    545 
    546 
    547 // Constants used for mmap.
    548 static const int kMmapFd = -1;
    549 static const int kMmapFdOffset = 0;
    550 
    551 
    552 VirtualMemory::VirtualMemory(size_t size) {
    553   address_ = mmap(NULL, size, PROT_NONE,
    554                   MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
    555                   kMmapFd, kMmapFdOffset);
    556   size_ = size;
    557 }
    558 
    559 
    560 VirtualMemory::~VirtualMemory() {
    561   if (IsReserved()) {
    562     if (0 == munmap(address(), size())) address_ = MAP_FAILED;
    563   }
    564 }
    565 
    566 
    567 bool VirtualMemory::IsReserved() {
    568   return address_ != MAP_FAILED;
    569 }
    570 
    571 
    572 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
    573   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
    574   if (MAP_FAILED == mmap(address, size, prot,
    575                          MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
    576                          kMmapFd, kMmapFdOffset)) {
    577     return false;
    578   }
    579 
    580   UpdateAllocatedSpaceLimits(address, size);
    581   return true;
    582 }
    583 
    584 
    585 bool VirtualMemory::Uncommit(void* address, size_t size) {
    586   return mmap(address, size, PROT_NONE,
    587               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
    588               kMmapFd, kMmapFdOffset) != MAP_FAILED;
    589 }
    590 
    591 
    592 class Thread::PlatformData : public Malloced {
    593  public:
    594   PlatformData() : thread_(kNoThread) {}
    595 
    596   pthread_t thread_;  // Thread handle for pthread.
    597 };
    598 
    599 Thread::Thread(Isolate* isolate, const Options& options)
    600     : data_(new PlatformData()),
    601       isolate_(isolate),
    602       stack_size_(options.stack_size) {
    603   set_name(options.name);
    604 }
    605 
    606 
    607 Thread::Thread(Isolate* isolate, const char* name)
    608     : data_(new PlatformData()),
    609       isolate_(isolate),
    610       stack_size_(0) {
    611   set_name(name);
    612 }
    613 
    614 
    615 Thread::~Thread() {
    616   delete data_;
    617 }
    618 
    619 
    620 static void* ThreadEntry(void* arg) {
    621   Thread* thread = reinterpret_cast<Thread*>(arg);
    622   // This is also initialized by the first argument to pthread_create() but we
    623   // don't know which thread will run first (the original thread or the new
    624   // one) so we initialize it here too.
    625   prctl(PR_SET_NAME,
    626         reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
    627         0, 0, 0);
    628   thread->data()->thread_ = pthread_self();
    629   ASSERT(thread->data()->thread_ != kNoThread);
    630   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
    631   thread->Run();
    632   return NULL;
    633 }
    634 
    635 
    636 void Thread::set_name(const char* name) {
    637   strncpy(name_, name, sizeof(name_));
    638   name_[sizeof(name_) - 1] = '\0';
    639 }
    640 
    641 
    642 void Thread::Start() {
    643   pthread_attr_t* attr_ptr = NULL;
    644   pthread_attr_t attr;
    645   if (stack_size_ > 0) {
    646     pthread_attr_init(&attr);
    647     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
    648     attr_ptr = &attr;
    649   }
    650   pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
    651   ASSERT(data_->thread_ != kNoThread);
    652 }
    653 
    654 
    655 void Thread::Join() {
    656   pthread_join(data_->thread_, NULL);
    657 }
    658 
    659 
    660 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
    661   pthread_key_t key;
    662   int result = pthread_key_create(&key, NULL);
    663   USE(result);
    664   ASSERT(result == 0);
    665   return static_cast<LocalStorageKey>(key);
    666 }
    667 
    668 
    669 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
    670   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
    671   int result = pthread_key_delete(pthread_key);
    672   USE(result);
    673   ASSERT(result == 0);
    674 }
    675 
    676 
    677 void* Thread::GetThreadLocal(LocalStorageKey key) {
    678   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
    679   return pthread_getspecific(pthread_key);
    680 }
    681 
    682 
    683 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
    684   pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
    685   pthread_setspecific(pthread_key, value);
    686 }
    687 
    688 
    689 void Thread::YieldCPU() {
    690   sched_yield();
    691 }
    692 
    693 
    694 class LinuxMutex : public Mutex {
    695  public:
    696 
    697   LinuxMutex() {
    698     pthread_mutexattr_t attrs;
    699     int result = pthread_mutexattr_init(&attrs);
    700     ASSERT(result == 0);
    701     result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
    702     ASSERT(result == 0);
    703     result = pthread_mutex_init(&mutex_, &attrs);
    704     ASSERT(result == 0);
    705   }
    706 
    707   virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
    708 
    709   virtual int Lock() {
    710     int result = pthread_mutex_lock(&mutex_);
    711     return result;
    712   }
    713 
    714   virtual int Unlock() {
    715     int result = pthread_mutex_unlock(&mutex_);
    716     return result;
    717   }
    718 
    719   virtual bool TryLock() {
    720     int result = pthread_mutex_trylock(&mutex_);
    721     // Return false if the lock is busy and locking failed.
    722     if (result == EBUSY) {
    723       return false;
    724     }
    725     ASSERT(result == 0);  // Verify no other errors.
    726     return true;
    727   }
    728 
    729  private:
    730   pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
    731 };
    732 
    733 
    734 Mutex* OS::CreateMutex() {
    735   return new LinuxMutex();
    736 }
    737 
    738 
    739 class LinuxSemaphore : public Semaphore {
    740  public:
    741   explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
    742   virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
    743 
    744   virtual void Wait();
    745   virtual bool Wait(int timeout);
    746   virtual void Signal() { sem_post(&sem_); }
    747  private:
    748   sem_t sem_;
    749 };
    750 
    751 
    752 void LinuxSemaphore::Wait() {
    753   while (true) {
    754     int result = sem_wait(&sem_);
    755     if (result == 0) return;  // Successfully got semaphore.
    756     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
    757   }
    758 }
    759 
    760 
    761 #ifndef TIMEVAL_TO_TIMESPEC
    762 #define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
    763     (ts)->tv_sec = (tv)->tv_sec;                                    \
    764     (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
    765 } while (false)
    766 #endif
    767 
    768 
    769 bool LinuxSemaphore::Wait(int timeout) {
    770   const long kOneSecondMicros = 1000000;  // NOLINT
    771 
    772   // Split timeout into second and nanosecond parts.
    773   struct timeval delta;
    774   delta.tv_usec = timeout % kOneSecondMicros;
    775   delta.tv_sec = timeout / kOneSecondMicros;
    776 
    777   struct timeval current_time;
    778   // Get the current time.
    779   if (gettimeofday(&current_time, NULL) == -1) {
    780     return false;
    781   }
    782 
    783   // Calculate time for end of timeout.
    784   struct timeval end_time;
    785   timeradd(&current_time, &delta, &end_time);
    786 
    787   struct timespec ts;
    788   TIMEVAL_TO_TIMESPEC(&end_time, &ts);
    789   // Wait for semaphore signalled or timeout.
    790   while (true) {
    791     int result = sem_timedwait(&sem_, &ts);
    792     if (result == 0) return true;  // Successfully got semaphore.
    793     if (result > 0) {
    794       // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
    795       errno = result;
    796       result = -1;
    797     }
    798     if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
    799     CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
    800   }
    801 }
    802 
    803 
    804 Semaphore* OS::CreateSemaphore(int count) {
    805   return new LinuxSemaphore(count);
    806 }
    807 
    808 
    809 #ifdef ENABLE_LOGGING_AND_PROFILING
    810 
    811 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
    812 // Android runs a fairly new Linux kernel, so signal info is there,
    813 // but the C library doesn't have the structs defined.
    814 
    815 struct sigcontext {
    816   uint32_t trap_no;
    817   uint32_t error_code;
    818   uint32_t oldmask;
    819   uint32_t gregs[16];
    820   uint32_t arm_cpsr;
    821   uint32_t fault_address;
    822 };
    823 typedef uint32_t __sigset_t;
    824 typedef struct sigcontext mcontext_t;
    825 typedef struct ucontext {
    826   uint32_t uc_flags;
    827   struct ucontext* uc_link;
    828   stack_t uc_stack;
    829   mcontext_t uc_mcontext;
    830   __sigset_t uc_sigmask;
    831 } ucontext_t;
    832 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
    833 
    834 #endif
    835 
    836 
    837 static int GetThreadID() {
    838   // Glibc doesn't provide a wrapper for gettid(2).
    839 #if defined(ANDROID)
    840   return syscall(__NR_gettid);
    841 #else
    842   return syscall(SYS_gettid);
    843 #endif
    844 }
    845 
    846 
    847 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
    848 #ifndef V8_HOST_ARCH_MIPS
    849   USE(info);
    850   if (signal != SIGPROF) return;
    851   Isolate* isolate = Isolate::UncheckedCurrent();
    852   if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
    853     // We require a fully initialized and entered isolate.
    854     return;
    855   }
    856   if (v8::Locker::IsActive() &&
    857       !isolate->thread_manager()->IsLockedByCurrentThread()) {
    858     return;
    859   }
    860 
    861   Sampler* sampler = isolate->logger()->sampler();
    862   if (sampler == NULL || !sampler->IsActive()) return;
    863 
    864   TickSample sample_obj;
    865   TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
    866   if (sample == NULL) sample = &sample_obj;
    867 
    868   // Extracting the sample from the context is extremely machine dependent.
    869   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
    870   mcontext_t& mcontext = ucontext->uc_mcontext;
    871   sample->state = isolate->current_vm_state();
    872 #if V8_HOST_ARCH_IA32
    873   sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
    874   sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
    875   sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
    876 #elif V8_HOST_ARCH_X64
    877   sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
    878   sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
    879   sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
    880 #elif V8_HOST_ARCH_ARM
    881 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
    882 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
    883   sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
    884   sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
    885   sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
    886 #else
    887   sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
    888   sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
    889   sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
    890 #endif
    891 #elif V8_HOST_ARCH_MIPS
    892   sample.pc = reinterpret_cast<Address>(mcontext.pc);
    893   sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
    894   sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
    895 #endif
    896   sampler->SampleStack(sample);
    897   sampler->Tick(sample);
    898 #endif
    899 }
    900 
    901 
    902 class Sampler::PlatformData : public Malloced {
    903  public:
    904   PlatformData() : vm_tid_(GetThreadID()) {}
    905 
    906   int vm_tid() const { return vm_tid_; }
    907 
    908  private:
    909   const int vm_tid_;
    910 };
    911 
    912 
    913 class SignalSender : public Thread {
    914  public:
    915   enum SleepInterval {
    916     HALF_INTERVAL,
    917     FULL_INTERVAL
    918   };
    919 
    920   explicit SignalSender(int interval)
    921       : Thread(NULL, "SignalSender"),
    922         vm_tgid_(getpid()),
    923         interval_(interval) {}
    924 
    925   static void InstallSignalHandler() {
    926     struct sigaction sa;
    927     sa.sa_sigaction = ProfilerSignalHandler;
    928     sigemptyset(&sa.sa_mask);
    929     sa.sa_flags = SA_RESTART | SA_SIGINFO;
    930     signal_handler_installed_ =
    931         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
    932   }
    933 
    934   static void RestoreSignalHandler() {
    935     if (signal_handler_installed_) {
    936       sigaction(SIGPROF, &old_signal_handler_, 0);
    937       signal_handler_installed_ = false;
    938     }
    939   }
    940 
    941   static void AddActiveSampler(Sampler* sampler) {
    942     ScopedLock lock(mutex_);
    943     SamplerRegistry::AddActiveSampler(sampler);
    944     if (instance_ == NULL) {
    945       // Start a thread that will send SIGPROF signal to VM threads,
    946       // when CPU profiling will be enabled.
    947       instance_ = new SignalSender(sampler->interval());
    948       instance_->Start();
    949     } else {
    950       ASSERT(instance_->interval_ == sampler->interval());
    951     }
    952   }
    953 
    954   static void RemoveActiveSampler(Sampler* sampler) {
    955     ScopedLock lock(mutex_);
    956     SamplerRegistry::RemoveActiveSampler(sampler);
    957     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
    958       RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
    959       instance_->Join();
    960       delete instance_;
    961       instance_ = NULL;
    962       RestoreSignalHandler();
    963     }
    964   }
    965 
    966   // Implement Thread::Run().
    967   virtual void Run() {
    968     SamplerRegistry::State state;
    969     while ((state = SamplerRegistry::GetState()) !=
    970            SamplerRegistry::HAS_NO_SAMPLERS) {
    971       bool cpu_profiling_enabled =
    972           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
    973       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
    974       if (cpu_profiling_enabled && !signal_handler_installed_)
    975         InstallSignalHandler();
    976       else if (!cpu_profiling_enabled && signal_handler_installed_)
    977         RestoreSignalHandler();
    978       // When CPU profiling is enabled both JavaScript and C++ code is
    979       // profiled. We must not suspend.
    980       if (!cpu_profiling_enabled) {
    981         if (rate_limiter_.SuspendIfNecessary()) continue;
    982       }
    983       if (cpu_profiling_enabled && runtime_profiler_enabled) {
    984         if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
    985           return;
    986         }
    987         Sleep(HALF_INTERVAL);
    988         if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
    989           return;
    990         }
    991         Sleep(HALF_INTERVAL);
    992       } else {
    993         if (cpu_profiling_enabled) {
    994           if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
    995                                                       this)) {
    996             return;
    997           }
    998         }
    999         if (runtime_profiler_enabled) {
   1000           if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
   1001                                                       NULL)) {
   1002             return;
   1003           }
   1004         }
   1005         Sleep(FULL_INTERVAL);
   1006       }
   1007     }
   1008   }
   1009 
   1010   static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
   1011     if (!sampler->IsProfiling()) return;
   1012     SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
   1013     sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
   1014   }
   1015 
   1016   static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
   1017     if (!sampler->isolate()->IsInitialized()) return;
   1018     sampler->isolate()->runtime_profiler()->NotifyTick();
   1019   }
   1020 
   1021   void SendProfilingSignal(int tid) {
   1022     if (!signal_handler_installed_) return;
   1023     // Glibc doesn't provide a wrapper for tgkill(2).
   1024 #if defined(ANDROID)
   1025     syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
   1026 #else
   1027     syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
   1028 #endif
   1029   }
   1030 
   1031   void Sleep(SleepInterval full_or_half) {
   1032     // Convert ms to us and subtract 100 us to compensate delays
   1033     // occuring during signal delivery.
   1034     useconds_t interval = interval_ * 1000 - 100;
   1035     if (full_or_half == HALF_INTERVAL) interval /= 2;
   1036     int result = usleep(interval);
   1037 #ifdef DEBUG
   1038     if (result != 0 && errno != EINTR) {
   1039       fprintf(stderr,
   1040               "SignalSender usleep error; interval = %u, errno = %d\n",
   1041               interval,
   1042               errno);
   1043       ASSERT(result == 0 || errno == EINTR);
   1044     }
   1045 #endif
   1046     USE(result);
   1047   }
   1048 
   1049   const int vm_tgid_;
   1050   const int interval_;
   1051   RuntimeProfilerRateLimiter rate_limiter_;
   1052 
   1053   // Protects the process wide state below.
   1054   static Mutex* mutex_;
   1055   static SignalSender* instance_;
   1056   static bool signal_handler_installed_;
   1057   static struct sigaction old_signal_handler_;
   1058 
   1059   DISALLOW_COPY_AND_ASSIGN(SignalSender);
   1060 };
   1061 
   1062 
   1063 Mutex* SignalSender::mutex_ = OS::CreateMutex();
   1064 SignalSender* SignalSender::instance_ = NULL;
   1065 struct sigaction SignalSender::old_signal_handler_;
   1066 bool SignalSender::signal_handler_installed_ = false;
   1067 
   1068 
   1069 Sampler::Sampler(Isolate* isolate, int interval)
   1070     : isolate_(isolate),
   1071       interval_(interval),
   1072       profiling_(false),
   1073       active_(false),
   1074       samples_taken_(0) {
   1075   data_ = new PlatformData;
   1076 }
   1077 
   1078 
   1079 Sampler::~Sampler() {
   1080   ASSERT(!IsActive());
   1081   delete data_;
   1082 }
   1083 
   1084 
   1085 void Sampler::Start() {
   1086   ASSERT(!IsActive());
   1087   SetActive(true);
   1088   SignalSender::AddActiveSampler(this);
   1089 }
   1090 
   1091 
   1092 void Sampler::Stop() {
   1093   ASSERT(IsActive());
   1094   SignalSender::RemoveActiveSampler(this);
   1095   SetActive(false);
   1096 }
   1097 
   1098 #endif  // ENABLE_LOGGING_AND_PROFILING
   1099 
   1100 } }  // namespace v8::internal
   1101