1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 // Platform specific code for MacOS goes here. For the POSIX comaptible parts 29 // the implementation is in platform-posix.cc. 30 31 #include <unistd.h> 32 #include <sys/mman.h> 33 #include <mach/mach_init.h> 34 #include <mach-o/dyld.h> 35 #include <mach-o/getsect.h> 36 37 #include <AvailabilityMacros.h> 38 39 #include <pthread.h> 40 #include <semaphore.h> 41 #include <signal.h> 42 #include <mach/mach.h> 43 #include <mach/semaphore.h> 44 #include <mach/task.h> 45 #include <mach/vm_statistics.h> 46 #include <sys/time.h> 47 #include <sys/resource.h> 48 #include <sys/types.h> 49 #include <stdarg.h> 50 #include <stdlib.h> 51 52 #include <errno.h> 53 54 #undef MAP_TYPE 55 56 #include "v8.h" 57 58 #include "platform.h" 59 60 // Manually define these here as weak imports, rather than including execinfo.h. 61 // This lets us launch on 10.4 which does not have these calls. 62 extern "C" { 63 extern int backtrace(void**, int) __attribute__((weak_import)); 64 extern char** backtrace_symbols(void* const*, int) 65 __attribute__((weak_import)); 66 extern void backtrace_symbols_fd(void* const*, int, int) 67 __attribute__((weak_import)); 68 } 69 70 71 namespace v8 { 72 namespace internal { 73 74 // 0 is never a valid thread id on MacOSX since a ptread_t is 75 // a pointer. 76 static const pthread_t kNoThread = (pthread_t) 0; 77 78 79 double ceiling(double x) { 80 // Correct Mac OS X Leopard 'ceil' behavior. 81 if (-1.0 < x && x < 0.0) { 82 return -0.0; 83 } else { 84 return ceil(x); 85 } 86 } 87 88 89 void OS::Setup() { 90 // Seed the random number generator. 91 // Convert the current time to a 64-bit integer first, before converting it 92 // to an unsigned. Going directly will cause an overflow and the seed to be 93 // set to all ones. The seed will be identical for different instances that 94 // call this setup code within the same millisecond. 95 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); 96 srandom(static_cast<unsigned int>(seed)); 97 } 98 99 100 // We keep the lowest and highest addresses mapped as a quick way of 101 // determining that pointers are outside the heap (used mostly in assertions 102 // and verification). The estimate is conservative, ie, not all addresses in 103 // 'allocated' space are actually allocated to our heap. The range is 104 // [lowest, highest), inclusive on the low and and exclusive on the high end. 105 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 106 static void* highest_ever_allocated = reinterpret_cast<void*>(0); 107 108 109 static void UpdateAllocatedSpaceLimits(void* address, int size) { 110 lowest_ever_allocated = Min(lowest_ever_allocated, address); 111 highest_ever_allocated = 112 Max(highest_ever_allocated, 113 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 114 } 115 116 117 bool OS::IsOutsideAllocatedSpace(void* address) { 118 return address < lowest_ever_allocated || address >= highest_ever_allocated; 119 } 120 121 122 size_t OS::AllocateAlignment() { 123 return getpagesize(); 124 } 125 126 127 // Constants used for mmap. 128 // kMmapFd is used to pass vm_alloc flags to tag the region with the user 129 // defined tag 255 This helps identify V8-allocated regions in memory analysis 130 // tools like vmmap(1). 131 static const int kMmapFd = VM_MAKE_TAG(255); 132 static const off_t kMmapFdOffset = 0; 133 134 135 void* OS::Allocate(const size_t requested, 136 size_t* allocated, 137 bool is_executable) { 138 const size_t msize = RoundUp(requested, getpagesize()); 139 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 140 void* mbase = mmap(NULL, msize, prot, 141 MAP_PRIVATE | MAP_ANON, 142 kMmapFd, kMmapFdOffset); 143 if (mbase == MAP_FAILED) { 144 LOG(StringEvent("OS::Allocate", "mmap failed")); 145 return NULL; 146 } 147 *allocated = msize; 148 UpdateAllocatedSpaceLimits(mbase, msize); 149 return mbase; 150 } 151 152 153 void OS::Free(void* address, const size_t size) { 154 // TODO(1240712): munmap has a return value which is ignored here. 155 int result = munmap(address, size); 156 USE(result); 157 ASSERT(result == 0); 158 } 159 160 161 #ifdef ENABLE_HEAP_PROTECTION 162 163 void OS::Protect(void* address, size_t size) { 164 UNIMPLEMENTED(); 165 } 166 167 168 void OS::Unprotect(void* address, size_t size, bool is_executable) { 169 UNIMPLEMENTED(); 170 } 171 172 #endif 173 174 175 void OS::Sleep(int milliseconds) { 176 usleep(1000 * milliseconds); 177 } 178 179 180 void OS::Abort() { 181 // Redirect to std abort to signal abnormal program termination 182 abort(); 183 } 184 185 186 void OS::DebugBreak() { 187 asm("int $3"); 188 } 189 190 191 class PosixMemoryMappedFile : public OS::MemoryMappedFile { 192 public: 193 PosixMemoryMappedFile(FILE* file, void* memory, int size) 194 : file_(file), memory_(memory), size_(size) { } 195 virtual ~PosixMemoryMappedFile(); 196 virtual void* memory() { return memory_; } 197 private: 198 FILE* file_; 199 void* memory_; 200 int size_; 201 }; 202 203 204 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, 205 void* initial) { 206 FILE* file = fopen(name, "w+"); 207 if (file == NULL) return NULL; 208 fwrite(initial, size, 1, file); 209 void* memory = 210 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 211 return new PosixMemoryMappedFile(file, memory, size); 212 } 213 214 215 PosixMemoryMappedFile::~PosixMemoryMappedFile() { 216 if (memory_) munmap(memory_, size_); 217 fclose(file_); 218 } 219 220 221 void OS::LogSharedLibraryAddresses() { 222 #ifdef ENABLE_LOGGING_AND_PROFILING 223 unsigned int images_count = _dyld_image_count(); 224 for (unsigned int i = 0; i < images_count; ++i) { 225 const mach_header* header = _dyld_get_image_header(i); 226 if (header == NULL) continue; 227 #if V8_HOST_ARCH_X64 228 uint64_t size; 229 char* code_ptr = getsectdatafromheader_64( 230 reinterpret_cast<const mach_header_64*>(header), 231 SEG_TEXT, 232 SECT_TEXT, 233 &size); 234 #else 235 unsigned int size; 236 char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); 237 #endif 238 if (code_ptr == NULL) continue; 239 const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); 240 const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; 241 LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); 242 } 243 #endif // ENABLE_LOGGING_AND_PROFILING 244 } 245 246 247 uint64_t OS::CpuFeaturesImpliedByPlatform() { 248 // MacOSX requires all these to install so we can assume they are present. 249 // These constants are defined by the CPUid instructions. 250 const uint64_t one = 1; 251 return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID); 252 } 253 254 255 int OS::ActivationFrameAlignment() { 256 // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI 257 // Function Call Guide". 258 return 16; 259 } 260 261 262 const char* OS::LocalTimezone(double time) { 263 if (isnan(time)) return ""; 264 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 265 struct tm* t = localtime(&tv); 266 if (NULL == t) return ""; 267 return t->tm_zone; 268 } 269 270 271 double OS::LocalTimeOffset() { 272 time_t tv = time(NULL); 273 struct tm* t = localtime(&tv); 274 // tm_gmtoff includes any daylight savings offset, so subtract it. 275 return static_cast<double>(t->tm_gmtoff * msPerSecond - 276 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); 277 } 278 279 280 int OS::StackWalk(Vector<StackFrame> frames) { 281 #ifdef ANDROID 282 // For some reason the weak linkage doesn't work when building mksnapshot 283 // for android on macos. Just bail out as if we're on 10.4. We don't need 284 // stack walking for mksnapshot. 285 return 0; 286 #else 287 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. 288 if (backtrace == NULL) 289 return 0; 290 291 int frames_size = frames.length(); 292 void** addresses = NewArray<void*>(frames_size); 293 int frames_count = backtrace(addresses, frames_size); 294 295 char** symbols; 296 symbols = backtrace_symbols(addresses, frames_count); 297 if (symbols == NULL) { 298 DeleteArray(addresses); 299 return kStackWalkError; 300 } 301 302 for (int i = 0; i < frames_count; i++) { 303 frames[i].address = addresses[i]; 304 // Format a text representation of the frame based on the information 305 // available. 306 SNPrintF(MutableCStrVector(frames[i].text, 307 kStackWalkMaxTextLen), 308 "%s", 309 symbols[i]); 310 // Make sure line termination is in place. 311 frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; 312 } 313 314 DeleteArray(addresses); 315 free(symbols); 316 317 return frames_count; 318 #endif // ANDROID 319 } 320 321 322 323 324 VirtualMemory::VirtualMemory(size_t size) { 325 address_ = mmap(NULL, size, PROT_NONE, 326 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 327 kMmapFd, kMmapFdOffset); 328 size_ = size; 329 } 330 331 332 VirtualMemory::~VirtualMemory() { 333 if (IsReserved()) { 334 if (0 == munmap(address(), size())) address_ = MAP_FAILED; 335 } 336 } 337 338 339 bool VirtualMemory::IsReserved() { 340 return address_ != MAP_FAILED; 341 } 342 343 344 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { 345 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 346 if (MAP_FAILED == mmap(address, size, prot, 347 MAP_PRIVATE | MAP_ANON | MAP_FIXED, 348 kMmapFd, kMmapFdOffset)) { 349 return false; 350 } 351 352 UpdateAllocatedSpaceLimits(address, size); 353 return true; 354 } 355 356 357 bool VirtualMemory::Uncommit(void* address, size_t size) { 358 return mmap(address, size, PROT_NONE, 359 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, 360 kMmapFd, kMmapFdOffset) != MAP_FAILED; 361 } 362 363 364 class ThreadHandle::PlatformData : public Malloced { 365 public: 366 explicit PlatformData(ThreadHandle::Kind kind) { 367 Initialize(kind); 368 } 369 370 void Initialize(ThreadHandle::Kind kind) { 371 switch (kind) { 372 case ThreadHandle::SELF: thread_ = pthread_self(); break; 373 case ThreadHandle::INVALID: thread_ = kNoThread; break; 374 } 375 } 376 pthread_t thread_; // Thread handle for pthread. 377 }; 378 379 380 381 ThreadHandle::ThreadHandle(Kind kind) { 382 data_ = new PlatformData(kind); 383 } 384 385 386 void ThreadHandle::Initialize(ThreadHandle::Kind kind) { 387 data_->Initialize(kind); 388 } 389 390 391 ThreadHandle::~ThreadHandle() { 392 delete data_; 393 } 394 395 396 bool ThreadHandle::IsSelf() const { 397 return pthread_equal(data_->thread_, pthread_self()); 398 } 399 400 401 bool ThreadHandle::IsValid() const { 402 return data_->thread_ != kNoThread; 403 } 404 405 406 Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) { 407 } 408 409 410 Thread::~Thread() { 411 } 412 413 414 static void* ThreadEntry(void* arg) { 415 Thread* thread = reinterpret_cast<Thread*>(arg); 416 // This is also initialized by the first argument to pthread_create() but we 417 // don't know which thread will run first (the original thread or the new 418 // one) so we initialize it here too. 419 thread->thread_handle_data()->thread_ = pthread_self(); 420 ASSERT(thread->IsValid()); 421 thread->Run(); 422 return NULL; 423 } 424 425 426 void Thread::Start() { 427 pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this); 428 } 429 430 431 void Thread::Join() { 432 pthread_join(thread_handle_data()->thread_, NULL); 433 } 434 435 436 Thread::LocalStorageKey Thread::CreateThreadLocalKey() { 437 pthread_key_t key; 438 int result = pthread_key_create(&key, NULL); 439 USE(result); 440 ASSERT(result == 0); 441 return static_cast<LocalStorageKey>(key); 442 } 443 444 445 void Thread::DeleteThreadLocalKey(LocalStorageKey key) { 446 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 447 int result = pthread_key_delete(pthread_key); 448 USE(result); 449 ASSERT(result == 0); 450 } 451 452 453 void* Thread::GetThreadLocal(LocalStorageKey key) { 454 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 455 return pthread_getspecific(pthread_key); 456 } 457 458 459 void Thread::SetThreadLocal(LocalStorageKey key, void* value) { 460 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 461 pthread_setspecific(pthread_key, value); 462 } 463 464 465 void Thread::YieldCPU() { 466 sched_yield(); 467 } 468 469 470 class MacOSMutex : public Mutex { 471 public: 472 473 MacOSMutex() { 474 pthread_mutexattr_t attr; 475 pthread_mutexattr_init(&attr); 476 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); 477 pthread_mutex_init(&mutex_, &attr); 478 } 479 480 ~MacOSMutex() { pthread_mutex_destroy(&mutex_); } 481 482 int Lock() { return pthread_mutex_lock(&mutex_); } 483 484 int Unlock() { return pthread_mutex_unlock(&mutex_); } 485 486 private: 487 pthread_mutex_t mutex_; 488 }; 489 490 491 Mutex* OS::CreateMutex() { 492 return new MacOSMutex(); 493 } 494 495 496 class MacOSSemaphore : public Semaphore { 497 public: 498 explicit MacOSSemaphore(int count) { 499 semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count); 500 } 501 502 ~MacOSSemaphore() { 503 semaphore_destroy(mach_task_self(), semaphore_); 504 } 505 506 // The MacOS mach semaphore documentation claims it does not have spurious 507 // wakeups, the way pthreads semaphores do. So the code from the linux 508 // platform is not needed here. 509 void Wait() { semaphore_wait(semaphore_); } 510 511 bool Wait(int timeout); 512 513 void Signal() { semaphore_signal(semaphore_); } 514 515 private: 516 semaphore_t semaphore_; 517 }; 518 519 520 bool MacOSSemaphore::Wait(int timeout) { 521 mach_timespec_t ts; 522 ts.tv_sec = timeout / 1000000; 523 ts.tv_nsec = (timeout % 1000000) * 1000; 524 return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT; 525 } 526 527 528 Semaphore* OS::CreateSemaphore(int count) { 529 return new MacOSSemaphore(count); 530 } 531 532 533 #ifdef ENABLE_LOGGING_AND_PROFILING 534 535 class Sampler::PlatformData : public Malloced { 536 public: 537 explicit PlatformData(Sampler* sampler) 538 : sampler_(sampler), 539 task_self_(mach_task_self()), 540 profiled_thread_(0), 541 sampler_thread_(0) { 542 } 543 544 Sampler* sampler_; 545 // Note: for profiled_thread_ Mach primitives are used instead of PThread's 546 // because the latter doesn't provide thread manipulation primitives required. 547 // For details, consult "Mac OS X Internals" book, Section 7.3. 548 mach_port_t task_self_; 549 thread_act_t profiled_thread_; 550 pthread_t sampler_thread_; 551 552 // Sampler thread handler. 553 void Runner() { 554 // Loop until the sampler is disengaged. 555 while (sampler_->IsActive()) { 556 TickSample sample; 557 558 // If profiling, we record the pc and sp of the profiled thread. 559 if (sampler_->IsProfiling() 560 && KERN_SUCCESS == thread_suspend(profiled_thread_)) { 561 #if V8_HOST_ARCH_X64 562 thread_state_flavor_t flavor = x86_THREAD_STATE64; 563 x86_thread_state64_t state; 564 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; 565 #if __DARWIN_UNIX03 566 #define REGISTER_FIELD(name) __r ## name 567 #else 568 #define REGISTER_FIELD(name) r ## name 569 #endif // __DARWIN_UNIX03 570 #elif V8_HOST_ARCH_IA32 571 thread_state_flavor_t flavor = i386_THREAD_STATE; 572 i386_thread_state_t state; 573 mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; 574 #if __DARWIN_UNIX03 575 #define REGISTER_FIELD(name) __e ## name 576 #else 577 #define REGISTER_FIELD(name) e ## name 578 #endif // __DARWIN_UNIX03 579 #else 580 #error Unsupported Mac OS X host architecture. 581 #endif // V8_HOST_ARCH 582 583 if (thread_get_state(profiled_thread_, 584 flavor, 585 reinterpret_cast<natural_t*>(&state), 586 &count) == KERN_SUCCESS) { 587 sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip)); 588 sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); 589 sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); 590 sampler_->SampleStack(&sample); 591 } 592 thread_resume(profiled_thread_); 593 } 594 595 // We always sample the VM state. 596 sample.state = Logger::state(); 597 // Invoke tick handler with program counter and stack pointer. 598 sampler_->Tick(&sample); 599 600 // Wait until next sampling. 601 usleep(sampler_->interval_ * 1000); 602 } 603 } 604 }; 605 606 #undef REGISTER_FIELD 607 608 609 // Entry point for sampler thread. 610 static void* SamplerEntry(void* arg) { 611 Sampler::PlatformData* data = 612 reinterpret_cast<Sampler::PlatformData*>(arg); 613 data->Runner(); 614 return 0; 615 } 616 617 618 Sampler::Sampler(int interval, bool profiling) 619 : interval_(interval), profiling_(profiling), active_(false) { 620 data_ = new PlatformData(this); 621 } 622 623 624 Sampler::~Sampler() { 625 delete data_; 626 } 627 628 629 void Sampler::Start() { 630 // If we are profiling, we need to be able to access the calling 631 // thread. 632 if (IsProfiling()) { 633 data_->profiled_thread_ = mach_thread_self(); 634 } 635 636 // Create sampler thread with high priority. 637 // According to POSIX spec, when SCHED_FIFO policy is used, a thread 638 // runs until it exits or blocks. 639 pthread_attr_t sched_attr; 640 sched_param fifo_param; 641 pthread_attr_init(&sched_attr); 642 pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED); 643 pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO); 644 fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO); 645 pthread_attr_setschedparam(&sched_attr, &fifo_param); 646 647 active_ = true; 648 pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_); 649 } 650 651 652 void Sampler::Stop() { 653 // Seting active to false triggers termination of the sampler 654 // thread. 655 active_ = false; 656 657 // Wait for sampler thread to terminate. 658 pthread_join(data_->sampler_thread_, NULL); 659 660 // Deallocate Mach port for thread. 661 if (IsProfiling()) { 662 mach_port_deallocate(data_->task_self_, data_->profiled_thread_); 663 } 664 } 665 666 #endif // ENABLE_LOGGING_AND_PROFILING 667 668 } } // namespace v8::internal 669