1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 // Platform specific code for OpenBSD and NetBSD goes here. For the POSIX 29 // comaptible parts the implementation is in platform-posix.cc. 30 31 #include <pthread.h> 32 #include <semaphore.h> 33 #include <signal.h> 34 #include <sys/time.h> 35 #include <sys/resource.h> 36 #include <sys/syscall.h> 37 #include <sys/types.h> 38 #include <stdlib.h> 39 40 #include <sys/types.h> // mmap & munmap 41 #include <sys/mman.h> // mmap & munmap 42 #include <sys/stat.h> // open 43 #include <fcntl.h> // open 44 #include <unistd.h> // sysconf 45 #include <execinfo.h> // backtrace, backtrace_symbols 46 #include <strings.h> // index 47 #include <errno.h> 48 #include <stdarg.h> 49 50 #undef MAP_TYPE 51 52 #include "v8.h" 53 54 #include "platform-posix.h" 55 #include "platform.h" 56 #include "v8threads.h" 57 #include "vm-state-inl.h" 58 59 60 namespace v8 { 61 namespace internal { 62 63 64 static Mutex* limit_mutex = NULL; 65 66 67 const char* OS::LocalTimezone(double time) { 68 if (std::isnan(time)) return ""; 69 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 70 struct tm* t = localtime(&tv); 71 if (NULL == t) return ""; 72 return t->tm_zone; 73 } 74 75 76 double OS::LocalTimeOffset() { 77 time_t tv = time(NULL); 78 struct tm* t = localtime(&tv); 79 // tm_gmtoff includes any daylight savings offset, so subtract it. 80 return static_cast<double>(t->tm_gmtoff * msPerSecond - 81 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); 82 } 83 84 85 // We keep the lowest and highest addresses mapped as a quick way of 86 // determining that pointers are outside the heap (used mostly in assertions 87 // and verification). The estimate is conservative, i.e., not all addresses in 88 // 'allocated' space are actually allocated to our heap. The range is 89 // [lowest, highest), inclusive on the low and and exclusive on the high end. 90 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 91 static void* highest_ever_allocated = reinterpret_cast<void*>(0); 92 93 94 static void UpdateAllocatedSpaceLimits(void* address, int size) { 95 ASSERT(limit_mutex != NULL); 96 ScopedLock lock(limit_mutex); 97 98 lowest_ever_allocated = Min(lowest_ever_allocated, address); 99 highest_ever_allocated = 100 Max(highest_ever_allocated, 101 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 102 } 103 104 105 bool OS::IsOutsideAllocatedSpace(void* address) { 106 return address < lowest_ever_allocated || address >= highest_ever_allocated; 107 } 108 109 110 void* OS::Allocate(const size_t requested, 111 size_t* allocated, 112 bool is_executable) { 113 const size_t msize = RoundUp(requested, AllocateAlignment()); 114 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 115 void* addr = OS::GetRandomMmapAddr(); 116 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); 117 if (mbase == MAP_FAILED) { 118 LOG(i::Isolate::Current(), 119 StringEvent("OS::Allocate", "mmap failed")); 120 return NULL; 121 } 122 *allocated = msize; 123 UpdateAllocatedSpaceLimits(mbase, msize); 124 return mbase; 125 } 126 127 128 void OS::DumpBacktrace() { 129 // Currently unsupported. 130 } 131 132 133 class PosixMemoryMappedFile : public OS::MemoryMappedFile { 134 public: 135 PosixMemoryMappedFile(FILE* file, void* memory, int size) 136 : file_(file), memory_(memory), size_(size) { } 137 virtual ~PosixMemoryMappedFile(); 138 virtual void* memory() { return memory_; } 139 virtual int size() { return size_; } 140 private: 141 FILE* file_; 142 void* memory_; 143 int size_; 144 }; 145 146 147 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { 148 FILE* file = fopen(name, "r+"); 149 if (file == NULL) return NULL; 150 151 fseek(file, 0, SEEK_END); 152 int size = ftell(file); 153 154 void* memory = 155 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 156 return new PosixMemoryMappedFile(file, memory, size); 157 } 158 159 160 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, 161 void* initial) { 162 FILE* file = fopen(name, "w+"); 163 if (file == NULL) return NULL; 164 int result = fwrite(initial, size, 1, file); 165 if (result < 1) { 166 fclose(file); 167 return NULL; 168 } 169 void* memory = 170 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 171 return new PosixMemoryMappedFile(file, memory, size); 172 } 173 174 175 PosixMemoryMappedFile::~PosixMemoryMappedFile() { 176 if (memory_) OS::Free(memory_, size_); 177 fclose(file_); 178 } 179 180 181 void OS::LogSharedLibraryAddresses() { 182 // This function assumes that the layout of the file is as follows: 183 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] 184 // If we encounter an unexpected situation we abort scanning further entries. 185 FILE* fp = fopen("/proc/self/maps", "r"); 186 if (fp == NULL) return; 187 188 // Allocate enough room to be able to store a full file name. 189 const int kLibNameLen = FILENAME_MAX + 1; 190 char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); 191 192 i::Isolate* isolate = ISOLATE; 193 // This loop will terminate once the scanning hits an EOF. 194 while (true) { 195 uintptr_t start, end; 196 char attr_r, attr_w, attr_x, attr_p; 197 // Parse the addresses and permission bits at the beginning of the line. 198 if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; 199 if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; 200 201 int c; 202 if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { 203 // Found a read-only executable entry. Skip characters until we reach 204 // the beginning of the filename or the end of the line. 205 do { 206 c = getc(fp); 207 } while ((c != EOF) && (c != '\n') && (c != '/')); 208 if (c == EOF) break; // EOF: Was unexpected, just exit. 209 210 // Process the filename if found. 211 if (c == '/') { 212 ungetc(c, fp); // Push the '/' back into the stream to be read below. 213 214 // Read to the end of the line. Exit if the read fails. 215 if (fgets(lib_name, kLibNameLen, fp) == NULL) break; 216 217 // Drop the newline character read by fgets. We do not need to check 218 // for a zero-length string because we know that we at least read the 219 // '/' character. 220 lib_name[strlen(lib_name) - 1] = '\0'; 221 } else { 222 // No library name found, just record the raw address range. 223 snprintf(lib_name, kLibNameLen, 224 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); 225 } 226 LOG(isolate, SharedLibraryEvent(lib_name, start, end)); 227 } else { 228 // Entry not describing executable data. Skip to end of line to set up 229 // reading the next entry. 230 do { 231 c = getc(fp); 232 } while ((c != EOF) && (c != '\n')); 233 if (c == EOF) break; 234 } 235 } 236 free(lib_name); 237 fclose(fp); 238 } 239 240 241 void OS::SignalCodeMovingGC() { 242 // Support for ll_prof.py. 243 // 244 // The Linux profiler built into the kernel logs all mmap's with 245 // PROT_EXEC so that analysis tools can properly attribute ticks. We 246 // do a mmap with a name known by ll_prof.py and immediately munmap 247 // it. This injects a GC marker into the stream of events generated 248 // by the kernel and allows us to synchronize V8 code log and the 249 // kernel log. 250 int size = sysconf(_SC_PAGESIZE); 251 FILE* f = fopen(FLAG_gc_fake_mmap, "w+"); 252 if (f == NULL) { 253 OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap); 254 OS::Abort(); 255 } 256 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, 257 fileno(f), 0); 258 ASSERT(addr != MAP_FAILED); 259 OS::Free(addr, size); 260 fclose(f); 261 } 262 263 264 int OS::StackWalk(Vector<OS::StackFrame> frames) { 265 // backtrace is a glibc extension. 266 int frames_size = frames.length(); 267 ScopedVector<void*> addresses(frames_size); 268 269 int frames_count = backtrace(addresses.start(), frames_size); 270 271 char** symbols = backtrace_symbols(addresses.start(), frames_count); 272 if (symbols == NULL) { 273 return kStackWalkError; 274 } 275 276 for (int i = 0; i < frames_count; i++) { 277 frames[i].address = addresses[i]; 278 // Format a text representation of the frame based on the information 279 // available. 280 SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), 281 "%s", 282 symbols[i]); 283 // Make sure line termination is in place. 284 frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; 285 } 286 287 free(symbols); 288 289 return frames_count; 290 } 291 292 293 // Constants used for mmap. 294 static const int kMmapFd = -1; 295 static const int kMmapFdOffset = 0; 296 297 298 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } 299 300 301 VirtualMemory::VirtualMemory(size_t size) 302 : address_(ReserveRegion(size)), size_(size) { } 303 304 305 VirtualMemory::VirtualMemory(size_t size, size_t alignment) 306 : address_(NULL), size_(0) { 307 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); 308 size_t request_size = RoundUp(size + alignment, 309 static_cast<intptr_t>(OS::AllocateAlignment())); 310 void* reservation = mmap(OS::GetRandomMmapAddr(), 311 request_size, 312 PROT_NONE, 313 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 314 kMmapFd, 315 kMmapFdOffset); 316 if (reservation == MAP_FAILED) return; 317 318 Address base = static_cast<Address>(reservation); 319 Address aligned_base = RoundUp(base, alignment); 320 ASSERT_LE(base, aligned_base); 321 322 // Unmap extra memory reserved before and after the desired block. 323 if (aligned_base != base) { 324 size_t prefix_size = static_cast<size_t>(aligned_base - base); 325 OS::Free(base, prefix_size); 326 request_size -= prefix_size; 327 } 328 329 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); 330 ASSERT_LE(aligned_size, request_size); 331 332 if (aligned_size != request_size) { 333 size_t suffix_size = request_size - aligned_size; 334 OS::Free(aligned_base + aligned_size, suffix_size); 335 request_size -= suffix_size; 336 } 337 338 ASSERT(aligned_size == request_size); 339 340 address_ = static_cast<void*>(aligned_base); 341 size_ = aligned_size; 342 } 343 344 345 VirtualMemory::~VirtualMemory() { 346 if (IsReserved()) { 347 bool result = ReleaseRegion(address(), size()); 348 ASSERT(result); 349 USE(result); 350 } 351 } 352 353 354 bool VirtualMemory::IsReserved() { 355 return address_ != NULL; 356 } 357 358 359 void VirtualMemory::Reset() { 360 address_ = NULL; 361 size_ = 0; 362 } 363 364 365 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { 366 return CommitRegion(address, size, is_executable); 367 } 368 369 370 bool VirtualMemory::Uncommit(void* address, size_t size) { 371 return UncommitRegion(address, size); 372 } 373 374 375 bool VirtualMemory::Guard(void* address) { 376 OS::Guard(address, OS::CommitPageSize()); 377 return true; 378 } 379 380 381 void* VirtualMemory::ReserveRegion(size_t size) { 382 void* result = mmap(OS::GetRandomMmapAddr(), 383 size, 384 PROT_NONE, 385 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 386 kMmapFd, 387 kMmapFdOffset); 388 389 if (result == MAP_FAILED) return NULL; 390 391 return result; 392 } 393 394 395 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { 396 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 397 if (MAP_FAILED == mmap(base, 398 size, 399 prot, 400 MAP_PRIVATE | MAP_ANON | MAP_FIXED, 401 kMmapFd, 402 kMmapFdOffset)) { 403 return false; 404 } 405 406 UpdateAllocatedSpaceLimits(base, size); 407 return true; 408 } 409 410 411 bool VirtualMemory::UncommitRegion(void* base, size_t size) { 412 return mmap(base, 413 size, 414 PROT_NONE, 415 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, 416 kMmapFd, 417 kMmapFdOffset) != MAP_FAILED; 418 } 419 420 421 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { 422 return munmap(base, size) == 0; 423 } 424 425 426 bool VirtualMemory::HasLazyCommits() { 427 // TODO(alph): implement for the platform. 428 return false; 429 } 430 431 432 class OpenBSDSemaphore : public Semaphore { 433 public: 434 explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); } 435 virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); } 436 437 virtual void Wait(); 438 virtual bool Wait(int timeout); 439 virtual void Signal() { sem_post(&sem_); } 440 private: 441 sem_t sem_; 442 }; 443 444 445 void OpenBSDSemaphore::Wait() { 446 while (true) { 447 int result = sem_wait(&sem_); 448 if (result == 0) return; // Successfully got semaphore. 449 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 450 } 451 } 452 453 454 #ifndef TIMEVAL_TO_TIMESPEC 455 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ 456 (ts)->tv_sec = (tv)->tv_sec; \ 457 (ts)->tv_nsec = (tv)->tv_usec * 1000; \ 458 } while (false) 459 #endif 460 461 462 bool OpenBSDSemaphore::Wait(int timeout) { 463 const long kOneSecondMicros = 1000000; // NOLINT 464 465 // Split timeout into second and nanosecond parts. 466 struct timeval delta; 467 delta.tv_usec = timeout % kOneSecondMicros; 468 delta.tv_sec = timeout / kOneSecondMicros; 469 470 struct timeval current_time; 471 // Get the current time. 472 if (gettimeofday(¤t_time, NULL) == -1) { 473 return false; 474 } 475 476 // Calculate time for end of timeout. 477 struct timeval end_time; 478 timeradd(¤t_time, &delta, &end_time); 479 480 struct timespec ts; 481 TIMEVAL_TO_TIMESPEC(&end_time, &ts); 482 483 int to = ts.tv_sec; 484 485 while (true) { 486 int result = sem_trywait(&sem_); 487 if (result == 0) return true; // Successfully got semaphore. 488 if (!to) return false; // Timeout. 489 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 490 usleep(ts.tv_nsec / 1000); 491 to--; 492 } 493 } 494 495 496 Semaphore* OS::CreateSemaphore(int count) { 497 return new OpenBSDSemaphore(count); 498 } 499 500 501 void OS::SetUp() { 502 // Seed the random number generator. We preserve microsecond resolution. 503 uint64_t seed = Ticks() ^ (getpid() << 16); 504 srandom(static_cast<unsigned int>(seed)); 505 limit_mutex = CreateMutex(); 506 } 507 508 509 void OS::TearDown() { 510 delete limit_mutex; 511 } 512 513 514 } } // namespace v8::internal 515