1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 // Platform specific code for FreeBSD goes here. For the POSIX comaptible parts 29 // the implementation is in platform-posix.cc. 30 31 #include <pthread.h> 32 #include <semaphore.h> 33 #include <signal.h> 34 #include <sys/time.h> 35 #include <sys/resource.h> 36 #include <sys/types.h> 37 #include <sys/ucontext.h> 38 #include <stdlib.h> 39 40 #include <sys/types.h> // mmap & munmap 41 #include <sys/mman.h> // mmap & munmap 42 #include <sys/stat.h> // open 43 #include <sys/fcntl.h> // open 44 #include <unistd.h> // getpagesize 45 // If you don't have execinfo.h then you need devel/libexecinfo from ports. 46 #include <execinfo.h> // backtrace, backtrace_symbols 47 #include <strings.h> // index 48 #include <errno.h> 49 #include <stdarg.h> 50 #include <limits.h> 51 52 #undef MAP_TYPE 53 54 #include "v8.h" 55 #include "v8threads.h" 56 57 #include "platform-posix.h" 58 #include "platform.h" 59 #include "vm-state-inl.h" 60 61 62 namespace v8 { 63 namespace internal { 64 65 66 static Mutex* limit_mutex = NULL; 67 68 69 const char* OS::LocalTimezone(double time) { 70 if (std::isnan(time)) return ""; 71 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 72 struct tm* t = localtime(&tv); 73 if (NULL == t) return ""; 74 return t->tm_zone; 75 } 76 77 78 double OS::LocalTimeOffset() { 79 time_t tv = time(NULL); 80 struct tm* t = localtime(&tv); 81 // tm_gmtoff includes any daylight savings offset, so subtract it. 82 return static_cast<double>(t->tm_gmtoff * msPerSecond - 83 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); 84 } 85 86 87 // We keep the lowest and highest addresses mapped as a quick way of 88 // determining that pointers are outside the heap (used mostly in assertions 89 // and verification). The estimate is conservative, i.e., not all addresses in 90 // 'allocated' space are actually allocated to our heap. The range is 91 // [lowest, highest), inclusive on the low and and exclusive on the high end. 92 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 93 static void* highest_ever_allocated = reinterpret_cast<void*>(0); 94 95 96 static void UpdateAllocatedSpaceLimits(void* address, int size) { 97 ASSERT(limit_mutex != NULL); 98 ScopedLock lock(limit_mutex); 99 100 lowest_ever_allocated = Min(lowest_ever_allocated, address); 101 highest_ever_allocated = 102 Max(highest_ever_allocated, 103 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 104 } 105 106 107 bool OS::IsOutsideAllocatedSpace(void* address) { 108 return address < lowest_ever_allocated || address >= highest_ever_allocated; 109 } 110 111 112 void* OS::Allocate(const size_t requested, 113 size_t* allocated, 114 bool executable) { 115 const size_t msize = RoundUp(requested, getpagesize()); 116 int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); 117 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); 118 119 if (mbase == MAP_FAILED) { 120 LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); 121 return NULL; 122 } 123 *allocated = msize; 124 UpdateAllocatedSpaceLimits(mbase, msize); 125 return mbase; 126 } 127 128 129 void OS::DumpBacktrace() { 130 POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); 131 } 132 133 134 class PosixMemoryMappedFile : public OS::MemoryMappedFile { 135 public: 136 PosixMemoryMappedFile(FILE* file, void* memory, int size) 137 : file_(file), memory_(memory), size_(size) { } 138 virtual ~PosixMemoryMappedFile(); 139 virtual void* memory() { return memory_; } 140 virtual int size() { return size_; } 141 private: 142 FILE* file_; 143 void* memory_; 144 int size_; 145 }; 146 147 148 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { 149 FILE* file = fopen(name, "r+"); 150 if (file == NULL) return NULL; 151 152 fseek(file, 0, SEEK_END); 153 int size = ftell(file); 154 155 void* memory = 156 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 157 return new PosixMemoryMappedFile(file, memory, size); 158 } 159 160 161 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, 162 void* initial) { 163 FILE* file = fopen(name, "w+"); 164 if (file == NULL) return NULL; 165 int result = fwrite(initial, size, 1, file); 166 if (result < 1) { 167 fclose(file); 168 return NULL; 169 } 170 void* memory = 171 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 172 return new PosixMemoryMappedFile(file, memory, size); 173 } 174 175 176 PosixMemoryMappedFile::~PosixMemoryMappedFile() { 177 if (memory_) munmap(memory_, size_); 178 fclose(file_); 179 } 180 181 182 static unsigned StringToLong(char* buffer) { 183 return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT 184 } 185 186 187 void OS::LogSharedLibraryAddresses() { 188 static const int MAP_LENGTH = 1024; 189 int fd = open("/proc/self/maps", O_RDONLY); 190 if (fd < 0) return; 191 while (true) { 192 char addr_buffer[11]; 193 addr_buffer[0] = '0'; 194 addr_buffer[1] = 'x'; 195 addr_buffer[10] = 0; 196 int result = read(fd, addr_buffer + 2, 8); 197 if (result < 8) break; 198 unsigned start = StringToLong(addr_buffer); 199 result = read(fd, addr_buffer + 2, 1); 200 if (result < 1) break; 201 if (addr_buffer[2] != '-') break; 202 result = read(fd, addr_buffer + 2, 8); 203 if (result < 8) break; 204 unsigned end = StringToLong(addr_buffer); 205 char buffer[MAP_LENGTH]; 206 int bytes_read = -1; 207 do { 208 bytes_read++; 209 if (bytes_read >= MAP_LENGTH - 1) 210 break; 211 result = read(fd, buffer + bytes_read, 1); 212 if (result < 1) break; 213 } while (buffer[bytes_read] != '\n'); 214 buffer[bytes_read] = 0; 215 // Ignore mappings that are not executable. 216 if (buffer[3] != 'x') continue; 217 char* start_of_path = index(buffer, '/'); 218 // There may be no filename in this line. Skip to next. 219 if (start_of_path == NULL) continue; 220 buffer[bytes_read] = 0; 221 LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); 222 } 223 close(fd); 224 } 225 226 227 void OS::SignalCodeMovingGC() { 228 } 229 230 231 int OS::StackWalk(Vector<OS::StackFrame> frames) { 232 return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); 233 } 234 235 236 // Constants used for mmap. 237 static const int kMmapFd = -1; 238 static const int kMmapFdOffset = 0; 239 240 241 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } 242 243 244 VirtualMemory::VirtualMemory(size_t size) 245 : address_(ReserveRegion(size)), size_(size) { } 246 247 248 VirtualMemory::VirtualMemory(size_t size, size_t alignment) 249 : address_(NULL), size_(0) { 250 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); 251 size_t request_size = RoundUp(size + alignment, 252 static_cast<intptr_t>(OS::AllocateAlignment())); 253 void* reservation = mmap(OS::GetRandomMmapAddr(), 254 request_size, 255 PROT_NONE, 256 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 257 kMmapFd, 258 kMmapFdOffset); 259 if (reservation == MAP_FAILED) return; 260 261 Address base = static_cast<Address>(reservation); 262 Address aligned_base = RoundUp(base, alignment); 263 ASSERT_LE(base, aligned_base); 264 265 // Unmap extra memory reserved before and after the desired block. 266 if (aligned_base != base) { 267 size_t prefix_size = static_cast<size_t>(aligned_base - base); 268 OS::Free(base, prefix_size); 269 request_size -= prefix_size; 270 } 271 272 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); 273 ASSERT_LE(aligned_size, request_size); 274 275 if (aligned_size != request_size) { 276 size_t suffix_size = request_size - aligned_size; 277 OS::Free(aligned_base + aligned_size, suffix_size); 278 request_size -= suffix_size; 279 } 280 281 ASSERT(aligned_size == request_size); 282 283 address_ = static_cast<void*>(aligned_base); 284 size_ = aligned_size; 285 } 286 287 288 VirtualMemory::~VirtualMemory() { 289 if (IsReserved()) { 290 bool result = ReleaseRegion(address(), size()); 291 ASSERT(result); 292 USE(result); 293 } 294 } 295 296 297 bool VirtualMemory::IsReserved() { 298 return address_ != NULL; 299 } 300 301 302 void VirtualMemory::Reset() { 303 address_ = NULL; 304 size_ = 0; 305 } 306 307 308 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { 309 return CommitRegion(address, size, is_executable); 310 } 311 312 313 bool VirtualMemory::Uncommit(void* address, size_t size) { 314 return UncommitRegion(address, size); 315 } 316 317 318 bool VirtualMemory::Guard(void* address) { 319 OS::Guard(address, OS::CommitPageSize()); 320 return true; 321 } 322 323 324 void* VirtualMemory::ReserveRegion(size_t size) { 325 void* result = mmap(OS::GetRandomMmapAddr(), 326 size, 327 PROT_NONE, 328 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 329 kMmapFd, 330 kMmapFdOffset); 331 332 if (result == MAP_FAILED) return NULL; 333 334 return result; 335 } 336 337 338 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { 339 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 340 if (MAP_FAILED == mmap(base, 341 size, 342 prot, 343 MAP_PRIVATE | MAP_ANON | MAP_FIXED, 344 kMmapFd, 345 kMmapFdOffset)) { 346 return false; 347 } 348 349 UpdateAllocatedSpaceLimits(base, size); 350 return true; 351 } 352 353 354 bool VirtualMemory::UncommitRegion(void* base, size_t size) { 355 return mmap(base, 356 size, 357 PROT_NONE, 358 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, 359 kMmapFd, 360 kMmapFdOffset) != MAP_FAILED; 361 } 362 363 364 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { 365 return munmap(base, size) == 0; 366 } 367 368 369 bool VirtualMemory::HasLazyCommits() { 370 // TODO(alph): implement for the platform. 371 return false; 372 } 373 374 375 class FreeBSDSemaphore : public Semaphore { 376 public: 377 explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); } 378 virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); } 379 380 virtual void Wait(); 381 virtual bool Wait(int timeout); 382 virtual void Signal() { sem_post(&sem_); } 383 private: 384 sem_t sem_; 385 }; 386 387 388 void FreeBSDSemaphore::Wait() { 389 while (true) { 390 int result = sem_wait(&sem_); 391 if (result == 0) return; // Successfully got semaphore. 392 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 393 } 394 } 395 396 397 bool FreeBSDSemaphore::Wait(int timeout) { 398 const long kOneSecondMicros = 1000000; // NOLINT 399 400 // Split timeout into second and nanosecond parts. 401 struct timeval delta; 402 delta.tv_usec = timeout % kOneSecondMicros; 403 delta.tv_sec = timeout / kOneSecondMicros; 404 405 struct timeval current_time; 406 // Get the current time. 407 if (gettimeofday(¤t_time, NULL) == -1) { 408 return false; 409 } 410 411 // Calculate time for end of timeout. 412 struct timeval end_time; 413 timeradd(¤t_time, &delta, &end_time); 414 415 struct timespec ts; 416 TIMEVAL_TO_TIMESPEC(&end_time, &ts); 417 while (true) { 418 int result = sem_timedwait(&sem_, &ts); 419 if (result == 0) return true; // Successfully got semaphore. 420 if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. 421 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 422 } 423 } 424 425 426 Semaphore* OS::CreateSemaphore(int count) { 427 return new FreeBSDSemaphore(count); 428 } 429 430 431 void OS::SetUp() { 432 // Seed the random number generator. 433 // Convert the current time to a 64-bit integer first, before converting it 434 // to an unsigned. Going directly can cause an overflow and the seed to be 435 // set to all ones. The seed will be identical for different instances that 436 // call this setup code within the same millisecond. 437 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); 438 srandom(static_cast<unsigned int>(seed)); 439 limit_mutex = CreateMutex(); 440 } 441 442 443 void OS::TearDown() { 444 delete limit_mutex; 445 } 446 447 448 } } // namespace v8::internal 449