Home | History | Annotate | Download | only in src
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 // This module contains the platform-specific code. This make the rest of the
     29 // code less dependent on operating system, compilers and runtime libraries.
     30 // This module does specifically not deal with differences between different
     31 // processor architecture.
     32 // The platform classes have the same definition for all platforms. The
     33 // implementation for a particular platform is put in platform_<os>.cc.
     34 // The build system then uses the implementation for the target platform.
     35 //
     36 // This design has been chosen because it is simple and fast. Alternatively,
     37 // the platform dependent classes could have been implemented using abstract
     38 // superclasses with virtual methods and having specializations for each
     39 // platform. This design was rejected because it was more complicated and
     40 // slower. It would require factory methods for selecting the right
     41 // implementation and the overhead of virtual methods for performance
     42 // sensitive like mutex locking/unlocking.
     43 
     44 #ifndef V8_PLATFORM_H_
     45 #define V8_PLATFORM_H_
     46 
     47 #ifdef __sun
     48 # ifndef signbit
     49 int signbit(double x);
     50 # endif
     51 #endif
     52 
     53 // GCC specific stuff
     54 #ifdef __GNUC__
     55 
     56 // Needed for va_list on at least MinGW and Android.
     57 #include <stdarg.h>
     58 
     59 #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
     60 
     61 #endif  // __GNUC__
     62 
     63 
     64 // Windows specific stuff.
     65 #ifdef WIN32
     66 
     67 // Microsoft Visual C++ specific stuff.
     68 #ifdef _MSC_VER
     69 
     70 #include "win32-math.h"
     71 
     72 int strncasecmp(const char* s1, const char* s2, int n);
     73 
     74 #endif  // _MSC_VER
     75 
     76 // Random is missing on both Visual Studio and MinGW.
     77 int random();
     78 
     79 #endif  // WIN32
     80 
     81 #include "atomicops.h"
     82 #include "lazy-instance.h"
     83 #include "platform-tls.h"
     84 #include "utils.h"
     85 #include "v8globals.h"
     86 
     87 namespace v8 {
     88 namespace internal {
     89 
     90 // Use AtomicWord for a machine-sized pointer. It is assumed that
     91 // reads and writes of naturally aligned values of this type are atomic.
     92 typedef intptr_t AtomicWord;
     93 
     94 class Semaphore;
     95 class Mutex;
     96 
     97 double ceiling(double x);
     98 double modulo(double x, double y);
     99 
    100 // Custom implementation of sin, cos, tan and log.
    101 double fast_sin(double input);
    102 double fast_cos(double input);
    103 double fast_tan(double input);
    104 double fast_log(double input);
    105 double fast_sqrt(double input);
    106 
    107 // Forward declarations.
    108 class Socket;
    109 
    110 // ----------------------------------------------------------------------------
    111 // OS
    112 //
    113 // This class has static methods for the different platform specific
    114 // functions. Add methods here to cope with differences between the
    115 // supported platforms.
    116 
    117 class OS {
    118  public:
    119   // Initializes the platform OS support. Called once at VM startup.
    120   static void SetUp();
    121 
    122   // Initializes the platform OS support that depend on CPU features. This is
    123   // called after CPU initialization.
    124   static void PostSetUp();
    125 
    126   // Returns the accumulated user time for thread. This routine
    127   // can be used for profiling. The implementation should
    128   // strive for high-precision timer resolution, preferable
    129   // micro-second resolution.
    130   static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
    131 
    132   // Get a tick counter normalized to one tick per microsecond.
    133   // Used for calculating time intervals.
    134   static int64_t Ticks();
    135 
    136   // Returns current time as the number of milliseconds since
    137   // 00:00:00 UTC, January 1, 1970.
    138   static double TimeCurrentMillis();
    139 
    140   // Returns a string identifying the current time zone. The
    141   // timestamp is used for determining if DST is in effect.
    142   static const char* LocalTimezone(double time);
    143 
    144   // Returns the local time offset in milliseconds east of UTC without
    145   // taking daylight savings time into account.
    146   static double LocalTimeOffset();
    147 
    148   // Returns the daylight savings offset for the given time.
    149   static double DaylightSavingsOffset(double time);
    150 
    151   // Returns last OS error.
    152   static int GetLastError();
    153 
    154   static FILE* FOpen(const char* path, const char* mode);
    155   static bool Remove(const char* path);
    156 
    157   // Opens a temporary file, the file is auto removed on close.
    158   static FILE* OpenTemporaryFile();
    159 
    160   // Log file open mode is platform-dependent due to line ends issues.
    161   static const char* const LogFileOpenMode;
    162 
    163   // Print output to console. This is mostly used for debugging output.
    164   // On platforms that has standard terminal output, the output
    165   // should go to stdout.
    166   static void Print(const char* format, ...);
    167   static void VPrint(const char* format, va_list args);
    168 
    169   // Print output to a file. This is mostly used for debugging output.
    170   static void FPrint(FILE* out, const char* format, ...);
    171   static void VFPrint(FILE* out, const char* format, va_list args);
    172 
    173   // Print error output to console. This is mostly used for error message
    174   // output. On platforms that has standard terminal output, the output
    175   // should go to stderr.
    176   static void PrintError(const char* format, ...);
    177   static void VPrintError(const char* format, va_list args);
    178 
    179   // Allocate/Free memory used by JS heap. Pages are readable/writable, but
    180   // they are not guaranteed to be executable unless 'executable' is true.
    181   // Returns the address of allocated memory, or NULL if failed.
    182   static void* Allocate(const size_t requested,
    183                         size_t* allocated,
    184                         bool is_executable);
    185   static void Free(void* address, const size_t size);
    186 
    187   // This is the granularity at which the ProtectCode(...) call can set page
    188   // permissions.
    189   static intptr_t CommitPageSize();
    190 
    191   // Mark code segments non-writable.
    192   static void ProtectCode(void* address, const size_t size);
    193 
    194   // Assign memory as a guard page so that access will cause an exception.
    195   static void Guard(void* address, const size_t size);
    196 
    197   // Generate a random address to be used for hinting mmap().
    198   static void* GetRandomMmapAddr();
    199 
    200   // Get the Alignment guaranteed by Allocate().
    201   static size_t AllocateAlignment();
    202 
    203   // Returns an indication of whether a pointer is in a space that
    204   // has been allocated by Allocate().  This method may conservatively
    205   // always return false, but giving more accurate information may
    206   // improve the robustness of the stack dump code in the presence of
    207   // heap corruption.
    208   static bool IsOutsideAllocatedSpace(void* pointer);
    209 
    210   // Sleep for a number of milliseconds.
    211   static void Sleep(const int milliseconds);
    212 
    213   // Abort the current process.
    214   static void Abort();
    215 
    216   // Debug break.
    217   static void DebugBreak();
    218 
    219   // Walk the stack.
    220   static const int kStackWalkError = -1;
    221   static const int kStackWalkMaxNameLen = 256;
    222   static const int kStackWalkMaxTextLen = 256;
    223   struct StackFrame {
    224     void* address;
    225     char text[kStackWalkMaxTextLen];
    226   };
    227 
    228   static int StackWalk(Vector<StackFrame> frames);
    229 
    230   // Factory method for creating platform dependent Mutex.
    231   // Please use delete to reclaim the storage for the returned Mutex.
    232   static Mutex* CreateMutex();
    233 
    234   // Factory method for creating platform dependent Semaphore.
    235   // Please use delete to reclaim the storage for the returned Semaphore.
    236   static Semaphore* CreateSemaphore(int count);
    237 
    238   // Factory method for creating platform dependent Socket.
    239   // Please use delete to reclaim the storage for the returned Socket.
    240   static Socket* CreateSocket();
    241 
    242   class MemoryMappedFile {
    243    public:
    244     static MemoryMappedFile* open(const char* name);
    245     static MemoryMappedFile* create(const char* name, int size, void* initial);
    246     virtual ~MemoryMappedFile() { }
    247     virtual void* memory() = 0;
    248     virtual int size() = 0;
    249   };
    250 
    251   // Safe formatting print. Ensures that str is always null-terminated.
    252   // Returns the number of chars written, or -1 if output was truncated.
    253   static int SNPrintF(Vector<char> str, const char* format, ...);
    254   static int VSNPrintF(Vector<char> str,
    255                        const char* format,
    256                        va_list args);
    257 
    258   static char* StrChr(char* str, int c);
    259   static void StrNCpy(Vector<char> dest, const char* src, size_t n);
    260 
    261   // Support for the profiler.  Can do nothing, in which case ticks
    262   // occuring in shared libraries will not be properly accounted for.
    263   static void LogSharedLibraryAddresses();
    264 
    265   // Support for the profiler.  Notifies the external profiling
    266   // process that a code moving garbage collection starts.  Can do
    267   // nothing, in which case the code objects must not move (e.g., by
    268   // using --never-compact) if accurate profiling is desired.
    269   static void SignalCodeMovingGC();
    270 
    271   // The return value indicates the CPU features we are sure of because of the
    272   // OS.  For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
    273   // instructions.
    274   // This is a little messy because the interpretation is subject to the cross
    275   // of the CPU and the OS.  The bits in the answer correspond to the bit
    276   // positions indicated by the members of the CpuFeature enum from globals.h
    277   static uint64_t CpuFeaturesImpliedByPlatform();
    278 
    279   // Maximum size of the virtual memory.  0 means there is no artificial
    280   // limit.
    281   static intptr_t MaxVirtualMemory();
    282 
    283   // Returns the double constant NAN
    284   static double nan_value();
    285 
    286   // Support runtime detection of VFP3 on ARM CPUs.
    287   static bool ArmCpuHasFeature(CpuFeature feature);
    288 
    289   // Support runtime detection of whether the hard float option of the
    290   // EABI is used.
    291   static bool ArmUsingHardFloat();
    292 
    293   // Support runtime detection of FPU on MIPS CPUs.
    294   static bool MipsCpuHasFeature(CpuFeature feature);
    295 
    296   // Returns the activation frame alignment constraint or zero if
    297   // the platform doesn't care. Guaranteed to be a power of two.
    298   static int ActivationFrameAlignment();
    299 
    300   static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
    301 
    302 #if defined(V8_TARGET_ARCH_IA32)
    303   // Copy memory area to disjoint memory area.
    304   static void MemCopy(void* dest, const void* src, size_t size);
    305   // Limit below which the extra overhead of the MemCopy function is likely
    306   // to outweigh the benefits of faster copying.
    307   static const int kMinComplexMemCopy = 64;
    308   typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
    309 
    310 #else  // V8_TARGET_ARCH_IA32
    311   static void MemCopy(void* dest, const void* src, size_t size) {
    312     memcpy(dest, src, size);
    313   }
    314   static const int kMinComplexMemCopy = 256;
    315 #endif  // V8_TARGET_ARCH_IA32
    316 
    317  private:
    318   static const int msPerSecond = 1000;
    319 
    320   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
    321 };
    322 
    323 // Represents and controls an area of reserved memory.
    324 // Control of the reserved memory can be assigned to another VirtualMemory
    325 // object by assignment or copy-contructing. This removes the reserved memory
    326 // from the original object.
    327 class VirtualMemory {
    328  public:
    329   // Empty VirtualMemory object, controlling no reserved memory.
    330   VirtualMemory();
    331 
    332   // Reserves virtual memory with size.
    333   explicit VirtualMemory(size_t size);
    334 
    335   // Reserves virtual memory containing an area of the given size that
    336   // is aligned per alignment. This may not be at the position returned
    337   // by address().
    338   VirtualMemory(size_t size, size_t alignment);
    339 
    340   // Releases the reserved memory, if any, controlled by this VirtualMemory
    341   // object.
    342   ~VirtualMemory();
    343 
    344   // Returns whether the memory has been reserved.
    345   bool IsReserved();
    346 
    347   // Initialize or resets an embedded VirtualMemory object.
    348   void Reset();
    349 
    350   // Returns the start address of the reserved memory.
    351   // If the memory was reserved with an alignment, this address is not
    352   // necessarily aligned. The user might need to round it up to a multiple of
    353   // the alignment to get the start of the aligned block.
    354   void* address() {
    355     ASSERT(IsReserved());
    356     return address_;
    357   }
    358 
    359   // Returns the size of the reserved memory. The returned value is only
    360   // meaningful when IsReserved() returns true.
    361   // If the memory was reserved with an alignment, this size may be larger
    362   // than the requested size.
    363   size_t size() { return size_; }
    364 
    365   // Commits real memory. Returns whether the operation succeeded.
    366   bool Commit(void* address, size_t size, bool is_executable);
    367 
    368   // Uncommit real memory.  Returns whether the operation succeeded.
    369   bool Uncommit(void* address, size_t size);
    370 
    371   // Creates a single guard page at the given address.
    372   bool Guard(void* address);
    373 
    374   void Release() {
    375     ASSERT(IsReserved());
    376     // Notice: Order is important here. The VirtualMemory object might live
    377     // inside the allocated region.
    378     void* address = address_;
    379     size_t size = size_;
    380     Reset();
    381     bool result = ReleaseRegion(address, size);
    382     USE(result);
    383     ASSERT(result);
    384   }
    385 
    386   // Assign control of the reserved region to a different VirtualMemory object.
    387   // The old object is no longer functional (IsReserved() returns false).
    388   void TakeControl(VirtualMemory* from) {
    389     ASSERT(!IsReserved());
    390     address_ = from->address_;
    391     size_ = from->size_;
    392     from->Reset();
    393   }
    394 
    395   static void* ReserveRegion(size_t size);
    396 
    397   static bool CommitRegion(void* base, size_t size, bool is_executable);
    398 
    399   static bool UncommitRegion(void* base, size_t size);
    400 
    401   // Must be called with a base pointer that has been returned by ReserveRegion
    402   // and the same size it was reserved with.
    403   static bool ReleaseRegion(void* base, size_t size);
    404 
    405  private:
    406   void* address_;  // Start address of the virtual memory.
    407   size_t size_;  // Size of the virtual memory.
    408 };
    409 
    410 
    411 // ----------------------------------------------------------------------------
    412 // Thread
    413 //
    414 // Thread objects are used for creating and running threads. When the start()
    415 // method is called the new thread starts running the run() method in the new
    416 // thread. The Thread object should not be deallocated before the thread has
    417 // terminated.
    418 
    419 class Thread {
    420  public:
    421   // Opaque data type for thread-local storage keys.
    422   // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
    423   // to ensure that enumeration type has correct value range (see Issue 830 for
    424   // more details).
    425   enum LocalStorageKey {
    426     LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
    427     LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
    428   };
    429 
    430   class Options {
    431    public:
    432     Options() : name_("v8:<unknown>"), stack_size_(0) {}
    433     Options(const char* name, int stack_size = 0)
    434         : name_(name), stack_size_(stack_size) {}
    435 
    436     const char* name() const { return name_; }
    437     int stack_size() const { return stack_size_; }
    438 
    439    private:
    440     const char* name_;
    441     int stack_size_;
    442   };
    443 
    444   // Create new thread.
    445   explicit Thread(const Options& options);
    446   virtual ~Thread();
    447 
    448   // Start new thread by calling the Run() method in the new thread.
    449   void Start();
    450 
    451   // Wait until thread terminates.
    452   void Join();
    453 
    454   inline const char* name() const {
    455     return name_;
    456   }
    457 
    458   // Abstract method for run handler.
    459   virtual void Run() = 0;
    460 
    461   // Thread-local storage.
    462   static LocalStorageKey CreateThreadLocalKey();
    463   static void DeleteThreadLocalKey(LocalStorageKey key);
    464   static void* GetThreadLocal(LocalStorageKey key);
    465   static int GetThreadLocalInt(LocalStorageKey key) {
    466     return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
    467   }
    468   static void SetThreadLocal(LocalStorageKey key, void* value);
    469   static void SetThreadLocalInt(LocalStorageKey key, int value) {
    470     SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
    471   }
    472   static bool HasThreadLocal(LocalStorageKey key) {
    473     return GetThreadLocal(key) != NULL;
    474   }
    475 
    476 #ifdef V8_FAST_TLS_SUPPORTED
    477   static inline void* GetExistingThreadLocal(LocalStorageKey key) {
    478     void* result = reinterpret_cast<void*>(
    479         InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
    480     ASSERT(result == GetThreadLocal(key));
    481     return result;
    482   }
    483 #else
    484   static inline void* GetExistingThreadLocal(LocalStorageKey key) {
    485     return GetThreadLocal(key);
    486   }
    487 #endif
    488 
    489   // A hint to the scheduler to let another thread run.
    490   static void YieldCPU();
    491 
    492 
    493   // The thread name length is limited to 16 based on Linux's implementation of
    494   // prctl().
    495   static const int kMaxThreadNameLength = 16;
    496 
    497   class PlatformData;
    498   PlatformData* data() { return data_; }
    499 
    500  private:
    501   void set_name(const char* name);
    502 
    503   PlatformData* data_;
    504 
    505   char name_[kMaxThreadNameLength];
    506   int stack_size_;
    507 
    508   DISALLOW_COPY_AND_ASSIGN(Thread);
    509 };
    510 
    511 
    512 // ----------------------------------------------------------------------------
    513 // Mutex
    514 //
    515 // Mutexes are used for serializing access to non-reentrant sections of code.
    516 // The implementations of mutex should allow for nested/recursive locking.
    517 
    518 class Mutex {
    519  public:
    520   virtual ~Mutex() {}
    521 
    522   // Locks the given mutex. If the mutex is currently unlocked, it becomes
    523   // locked and owned by the calling thread, and immediately. If the mutex
    524   // is already locked by another thread, suspends the calling thread until
    525   // the mutex is unlocked.
    526   virtual int Lock() = 0;
    527 
    528   // Unlocks the given mutex. The mutex is assumed to be locked and owned by
    529   // the calling thread on entrance.
    530   virtual int Unlock() = 0;
    531 
    532   // Tries to lock the given mutex. Returns whether the mutex was
    533   // successfully locked.
    534   virtual bool TryLock() = 0;
    535 };
    536 
    537 struct CreateMutexTrait {
    538   static Mutex* Create() {
    539     return OS::CreateMutex();
    540   }
    541 };
    542 
    543 // POD Mutex initialized lazily (i.e. the first time Pointer() is called).
    544 // Usage:
    545 //   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
    546 //
    547 //   void my_function() {
    548 //     ScopedLock my_lock(my_mutex.Pointer());
    549 //     // Do something.
    550 //   }
    551 //
    552 typedef LazyDynamicInstance<
    553     Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
    554 
    555 #define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
    556 
    557 // ----------------------------------------------------------------------------
    558 // ScopedLock
    559 //
    560 // Stack-allocated ScopedLocks provide block-scoped locking and
    561 // unlocking of a mutex.
    562 class ScopedLock {
    563  public:
    564   explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
    565     ASSERT(mutex_ != NULL);
    566     mutex_->Lock();
    567   }
    568   ~ScopedLock() {
    569     mutex_->Unlock();
    570   }
    571 
    572  private:
    573   Mutex* mutex_;
    574   DISALLOW_COPY_AND_ASSIGN(ScopedLock);
    575 };
    576 
    577 
    578 // ----------------------------------------------------------------------------
    579 // Semaphore
    580 //
    581 // A semaphore object is a synchronization object that maintains a count. The
    582 // count is decremented each time a thread completes a wait for the semaphore
    583 // object and incremented each time a thread signals the semaphore. When the
    584 // count reaches zero,  threads waiting for the semaphore blocks until the
    585 // count becomes non-zero.
    586 
    587 class Semaphore {
    588  public:
    589   virtual ~Semaphore() {}
    590 
    591   // Suspends the calling thread until the semaphore counter is non zero
    592   // and then decrements the semaphore counter.
    593   virtual void Wait() = 0;
    594 
    595   // Suspends the calling thread until the counter is non zero or the timeout
    596   // time has passed. If timeout happens the return value is false and the
    597   // counter is unchanged. Otherwise the semaphore counter is decremented and
    598   // true is returned. The timeout value is specified in microseconds.
    599   virtual bool Wait(int timeout) = 0;
    600 
    601   // Increments the semaphore counter.
    602   virtual void Signal() = 0;
    603 };
    604 
    605 template <int InitialValue>
    606 struct CreateSemaphoreTrait {
    607   static Semaphore* Create() {
    608     return OS::CreateSemaphore(InitialValue);
    609   }
    610 };
    611 
    612 // POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
    613 // Usage:
    614 //   // The following semaphore starts at 0.
    615 //   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
    616 //
    617 //   void my_function() {
    618 //     // Do something with my_semaphore.Pointer().
    619 //   }
    620 //
    621 template <int InitialValue>
    622 struct LazySemaphore {
    623   typedef typename LazyDynamicInstance<
    624       Semaphore, CreateSemaphoreTrait<InitialValue>,
    625       ThreadSafeInitOnceTrait>::type type;
    626 };
    627 
    628 #define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
    629 
    630 
    631 // ----------------------------------------------------------------------------
    632 // Socket
    633 //
    634 
    635 class Socket {
    636  public:
    637   virtual ~Socket() {}
    638 
    639   // Server initialization.
    640   virtual bool Bind(const int port) = 0;
    641   virtual bool Listen(int backlog) const = 0;
    642   virtual Socket* Accept() const = 0;
    643 
    644   // Client initialization.
    645   virtual bool Connect(const char* host, const char* port) = 0;
    646 
    647   // Shutdown socket for both read and write. This causes blocking Send and
    648   // Receive calls to exit. After Shutdown the Socket object cannot be used for
    649   // any communication.
    650   virtual bool Shutdown() = 0;
    651 
    652   // Data Transimission
    653   virtual int Send(const char* data, int len) const = 0;
    654   virtual int Receive(char* data, int len) const = 0;
    655 
    656   // Set the value of the SO_REUSEADDR socket option.
    657   virtual bool SetReuseAddress(bool reuse_address) = 0;
    658 
    659   virtual bool IsValid() const = 0;
    660 
    661   static bool SetUp();
    662   static int LastError();
    663   static uint16_t HToN(uint16_t value);
    664   static uint16_t NToH(uint16_t value);
    665   static uint32_t HToN(uint32_t value);
    666   static uint32_t NToH(uint32_t value);
    667 };
    668 
    669 
    670 // ----------------------------------------------------------------------------
    671 // Sampler
    672 //
    673 // A sampler periodically samples the state of the VM and optionally
    674 // (if used for profiling) the program counter and stack pointer for
    675 // the thread that created it.
    676 
    677 // TickSample captures the information collected for each sample.
    678 class TickSample {
    679  public:
    680   TickSample()
    681       : state(OTHER),
    682         pc(NULL),
    683         sp(NULL),
    684         fp(NULL),
    685         tos(NULL),
    686         frames_count(0),
    687         has_external_callback(false) {}
    688   StateTag state;  // The state of the VM.
    689   Address pc;      // Instruction pointer.
    690   Address sp;      // Stack pointer.
    691   Address fp;      // Frame pointer.
    692   union {
    693     Address tos;   // Top stack value (*sp).
    694     Address external_callback;
    695   };
    696   static const int kMaxFramesCount = 64;
    697   Address stack[kMaxFramesCount];  // Call stack.
    698   int frames_count : 8;  // Number of captured frames.
    699   bool has_external_callback : 1;
    700 };
    701 
    702 class Sampler {
    703  public:
    704   // Initialize sampler.
    705   Sampler(Isolate* isolate, int interval);
    706   virtual ~Sampler();
    707 
    708   int interval() const { return interval_; }
    709 
    710   // Performs stack sampling.
    711   void SampleStack(TickSample* sample) {
    712     DoSampleStack(sample);
    713     IncSamplesTaken();
    714   }
    715 
    716   // This method is called for each sampling period with the current
    717   // program counter.
    718   virtual void Tick(TickSample* sample) = 0;
    719 
    720   // Start and stop sampler.
    721   void Start();
    722   void Stop();
    723 
    724   // Is the sampler used for profiling?
    725   bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
    726   void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
    727   void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
    728 
    729   // Whether the sampler is running (that is, consumes resources).
    730   bool IsActive() const { return NoBarrier_Load(&active_); }
    731 
    732   Isolate* isolate() { return isolate_; }
    733 
    734   // Used in tests to make sure that stack sampling is performed.
    735   int samples_taken() const { return samples_taken_; }
    736   void ResetSamplesTaken() { samples_taken_ = 0; }
    737 
    738   class PlatformData;
    739   PlatformData* data() { return data_; }
    740 
    741   PlatformData* platform_data() { return data_; }
    742 
    743  protected:
    744   virtual void DoSampleStack(TickSample* sample) = 0;
    745 
    746  private:
    747   void SetActive(bool value) { NoBarrier_Store(&active_, value); }
    748   void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
    749 
    750   Isolate* isolate_;
    751   const int interval_;
    752   Atomic32 profiling_;
    753   Atomic32 active_;
    754   PlatformData* data_;  // Platform specific data.
    755   int samples_taken_;  // Counts stack samples taken.
    756   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
    757 };
    758 
    759 
    760 } }  // namespace v8::internal
    761 
    762 #endif  // V8_PLATFORM_H_
    763