1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 2 // All Rights Reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are 6 // met: 7 // 8 // - Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // 11 // - Redistribution in binary form must reproduce the above copyright 12 // notice, this list of conditions and the following disclaimer in the 13 // documentation and/or other materials provided with the distribution. 14 // 15 // - Neither the name of Sun Microsystems or the names of contributors may 16 // be used to endorse or promote products derived from this software without 17 // specific prior written permission. 18 // 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 // The original source code covered by the above license above has been 32 // modified significantly by Google Inc. 33 // Copyright 2012 the V8 project authors. All rights reserved. 34 35 #include "assembler.h" 36 37 #include <cmath> 38 #include "api.h" 39 #include "builtins.h" 40 #include "counters.h" 41 #include "cpu.h" 42 #include "debug.h" 43 #include "deoptimizer.h" 44 #include "execution.h" 45 #include "ic.h" 46 #include "isolate-inl.h" 47 #include "jsregexp.h" 48 #include "lazy-instance.h" 49 #include "platform.h" 50 #include "regexp-macro-assembler.h" 51 #include "regexp-stack.h" 52 #include "runtime.h" 53 #include "serialize.h" 54 #include "store-buffer-inl.h" 55 #include "stub-cache.h" 56 #include "token.h" 57 58 #if V8_TARGET_ARCH_IA32 59 #include "ia32/assembler-ia32-inl.h" 60 #elif V8_TARGET_ARCH_X64 61 #include "x64/assembler-x64-inl.h" 62 #elif V8_TARGET_ARCH_ARM 63 #include "arm/assembler-arm-inl.h" 64 #elif V8_TARGET_ARCH_MIPS 65 #include "mips/assembler-mips-inl.h" 66 #else 67 #error "Unknown architecture." 68 #endif 69 70 // Include native regexp-macro-assembler. 71 #ifndef V8_INTERPRETED_REGEXP 72 #if V8_TARGET_ARCH_IA32 73 #include "ia32/regexp-macro-assembler-ia32.h" 74 #elif V8_TARGET_ARCH_X64 75 #include "x64/regexp-macro-assembler-x64.h" 76 #elif V8_TARGET_ARCH_ARM 77 #include "arm/regexp-macro-assembler-arm.h" 78 #elif V8_TARGET_ARCH_MIPS 79 #include "mips/regexp-macro-assembler-mips.h" 80 #else // Unknown architecture. 81 #error "Unknown architecture." 82 #endif // Target architecture. 83 #endif // V8_INTERPRETED_REGEXP 84 85 namespace v8 { 86 namespace internal { 87 88 // ----------------------------------------------------------------------------- 89 // Common double constants. 90 91 struct DoubleConstant BASE_EMBEDDED { 92 double min_int; 93 double one_half; 94 double minus_one_half; 95 double minus_zero; 96 double zero; 97 double uint8_max_value; 98 double negative_infinity; 99 double canonical_non_hole_nan; 100 double the_hole_nan; 101 double uint32_bias; 102 }; 103 104 static DoubleConstant double_constants; 105 106 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; 107 108 static bool math_exp_data_initialized = false; 109 static Mutex* math_exp_data_mutex = NULL; 110 static double* math_exp_constants_array = NULL; 111 static double* math_exp_log_table_array = NULL; 112 113 // ----------------------------------------------------------------------------- 114 // Implementation of AssemblerBase 115 116 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) 117 : isolate_(isolate), 118 jit_cookie_(0), 119 enabled_cpu_features_(0), 120 emit_debug_code_(FLAG_debug_code), 121 predictable_code_size_(false) { 122 if (FLAG_mask_constants_with_cookie && isolate != NULL) { 123 jit_cookie_ = isolate->random_number_generator()->NextInt(); 124 } 125 126 if (buffer == NULL) { 127 // Do our own buffer management. 128 if (buffer_size <= kMinimalBufferSize) { 129 buffer_size = kMinimalBufferSize; 130 if (isolate->assembler_spare_buffer() != NULL) { 131 buffer = isolate->assembler_spare_buffer(); 132 isolate->set_assembler_spare_buffer(NULL); 133 } 134 } 135 if (buffer == NULL) buffer = NewArray<byte>(buffer_size); 136 own_buffer_ = true; 137 } else { 138 // Use externally provided buffer instead. 139 ASSERT(buffer_size > 0); 140 own_buffer_ = false; 141 } 142 buffer_ = static_cast<byte*>(buffer); 143 buffer_size_ = buffer_size; 144 145 pc_ = buffer_; 146 } 147 148 149 AssemblerBase::~AssemblerBase() { 150 if (own_buffer_) { 151 if (isolate() != NULL && 152 isolate()->assembler_spare_buffer() == NULL && 153 buffer_size_ == kMinimalBufferSize) { 154 isolate()->set_assembler_spare_buffer(buffer_); 155 } else { 156 DeleteArray(buffer_); 157 } 158 } 159 } 160 161 162 // ----------------------------------------------------------------------------- 163 // Implementation of PredictableCodeSizeScope 164 165 PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler, 166 int expected_size) 167 : assembler_(assembler), 168 expected_size_(expected_size), 169 start_offset_(assembler->pc_offset()), 170 old_value_(assembler->predictable_code_size()) { 171 assembler_->set_predictable_code_size(true); 172 } 173 174 175 PredictableCodeSizeScope::~PredictableCodeSizeScope() { 176 // TODO(svenpanne) Remove the 'if' when everything works. 177 if (expected_size_ >= 0) { 178 CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_); 179 } 180 assembler_->set_predictable_code_size(old_value_); 181 } 182 183 184 // ----------------------------------------------------------------------------- 185 // Implementation of CpuFeatureScope 186 187 #ifdef DEBUG 188 CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) 189 : assembler_(assembler) { 190 ASSERT(CpuFeatures::IsSafeForSnapshot(f)); 191 old_enabled_ = assembler_->enabled_cpu_features(); 192 uint64_t mask = static_cast<uint64_t>(1) << f; 193 // TODO(svenpanne) This special case below doesn't belong here! 194 #if V8_TARGET_ARCH_ARM 195 // ARMv7 is implied by VFP3. 196 if (f == VFP3) { 197 mask |= static_cast<uint64_t>(1) << ARMv7; 198 } 199 #endif 200 assembler_->set_enabled_cpu_features(old_enabled_ | mask); 201 } 202 203 204 CpuFeatureScope::~CpuFeatureScope() { 205 assembler_->set_enabled_cpu_features(old_enabled_); 206 } 207 #endif 208 209 210 // ----------------------------------------------------------------------------- 211 // Implementation of PlatformFeatureScope 212 213 PlatformFeatureScope::PlatformFeatureScope(CpuFeature f) 214 : old_cross_compile_(CpuFeatures::cross_compile_) { 215 // CpuFeatures is a global singleton, therefore this is only safe in 216 // single threaded code. 217 ASSERT(Serializer::enabled()); 218 uint64_t mask = static_cast<uint64_t>(1) << f; 219 CpuFeatures::cross_compile_ |= mask; 220 } 221 222 223 PlatformFeatureScope::~PlatformFeatureScope() { 224 CpuFeatures::cross_compile_ = old_cross_compile_; 225 } 226 227 228 // ----------------------------------------------------------------------------- 229 // Implementation of Label 230 231 int Label::pos() const { 232 if (pos_ < 0) return -pos_ - 1; 233 if (pos_ > 0) return pos_ - 1; 234 UNREACHABLE(); 235 return 0; 236 } 237 238 239 // ----------------------------------------------------------------------------- 240 // Implementation of RelocInfoWriter and RelocIterator 241 // 242 // Relocation information is written backwards in memory, from high addresses 243 // towards low addresses, byte by byte. Therefore, in the encodings listed 244 // below, the first byte listed it at the highest address, and successive 245 // bytes in the record are at progressively lower addresses. 246 // 247 // Encoding 248 // 249 // The most common modes are given single-byte encodings. Also, it is 250 // easy to identify the type of reloc info and skip unwanted modes in 251 // an iteration. 252 // 253 // The encoding relies on the fact that there are fewer than 14 254 // different relocation modes using standard non-compact encoding. 255 // 256 // The first byte of a relocation record has a tag in its low 2 bits: 257 // Here are the record schemes, depending on the low tag and optional higher 258 // tags. 259 // 260 // Low tag: 261 // 00: embedded_object: [6-bit pc delta] 00 262 // 263 // 01: code_target: [6-bit pc delta] 01 264 // 265 // 10: short_data_record: [6-bit pc delta] 10 followed by 266 // [6-bit data delta] [2-bit data type tag] 267 // 268 // 11: long_record [2-bit high tag][4 bit middle_tag] 11 269 // followed by variable data depending on type. 270 // 271 // 2-bit data type tags, used in short_data_record and data_jump long_record: 272 // code_target_with_id: 00 273 // position: 01 274 // statement_position: 10 275 // comment: 11 (not used in short_data_record) 276 // 277 // Long record format: 278 // 4-bit middle_tag: 279 // 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2 280 // (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM, 281 // and is between 0000 and 1100) 282 // The format is: 283 // 00 [4 bit middle_tag] 11 followed by 284 // 00 [6 bit pc delta] 285 // 286 // 1101: constant pool. Used on ARM only for now. 287 // The format is: 11 1101 11 288 // signed int (size of the constant pool). 289 // 1110: long_data_record 290 // The format is: [2-bit data_type_tag] 1110 11 291 // signed intptr_t, lowest byte written first 292 // (except data_type code_target_with_id, which 293 // is followed by a signed int, not intptr_t.) 294 // 295 // 1111: long_pc_jump 296 // The format is: 297 // pc-jump: 00 1111 11, 298 // 00 [6 bits pc delta] 299 // or 300 // pc-jump (variable length): 301 // 01 1111 11, 302 // [7 bits data] 0 303 // ... 304 // [7 bits data] 1 305 // (Bits 6..31 of pc delta, with leading zeroes 306 // dropped, and last non-zero chunk tagged with 1.) 307 308 309 const int kMaxStandardNonCompactModes = 14; 310 311 const int kTagBits = 2; 312 const int kTagMask = (1 << kTagBits) - 1; 313 const int kExtraTagBits = 4; 314 const int kLocatableTypeTagBits = 2; 315 const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits; 316 317 const int kEmbeddedObjectTag = 0; 318 const int kCodeTargetTag = 1; 319 const int kLocatableTag = 2; 320 const int kDefaultTag = 3; 321 322 const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1; 323 324 const int kSmallPCDeltaBits = kBitsPerByte - kTagBits; 325 const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1; 326 const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask; 327 328 const int kVariableLengthPCJumpTopTag = 1; 329 const int kChunkBits = 7; 330 const int kChunkMask = (1 << kChunkBits) - 1; 331 const int kLastChunkTagBits = 1; 332 const int kLastChunkTagMask = 1; 333 const int kLastChunkTag = 1; 334 335 336 const int kDataJumpExtraTag = kPCJumpExtraTag - 1; 337 338 const int kCodeWithIdTag = 0; 339 const int kNonstatementPositionTag = 1; 340 const int kStatementPositionTag = 2; 341 const int kCommentTag = 3; 342 343 const int kConstPoolExtraTag = kPCJumpExtraTag - 2; 344 const int kConstPoolTag = 3; 345 346 347 uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) { 348 // Return if the pc_delta can fit in kSmallPCDeltaBits bits. 349 // Otherwise write a variable length PC jump for the bits that do 350 // not fit in the kSmallPCDeltaBits bits. 351 if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta; 352 WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag); 353 uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits; 354 ASSERT(pc_jump > 0); 355 // Write kChunkBits size chunks of the pc_jump. 356 for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) { 357 byte b = pc_jump & kChunkMask; 358 *--pos_ = b << kLastChunkTagBits; 359 } 360 // Tag the last chunk so it can be identified. 361 *pos_ = *pos_ | kLastChunkTag; 362 // Return the remaining kSmallPCDeltaBits of the pc_delta. 363 return pc_delta & kSmallPCDeltaMask; 364 } 365 366 367 void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) { 368 // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump. 369 pc_delta = WriteVariableLengthPCJump(pc_delta); 370 *--pos_ = pc_delta << kTagBits | tag; 371 } 372 373 374 void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) { 375 *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag); 376 } 377 378 379 void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) { 380 *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) | 381 extra_tag << kTagBits | 382 kDefaultTag); 383 } 384 385 386 void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) { 387 // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump. 388 pc_delta = WriteVariableLengthPCJump(pc_delta); 389 WriteExtraTag(extra_tag, 0); 390 *--pos_ = pc_delta; 391 } 392 393 394 void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) { 395 WriteExtraTag(kDataJumpExtraTag, top_tag); 396 for (int i = 0; i < kIntSize; i++) { 397 *--pos_ = static_cast<byte>(data_delta); 398 // Signed right shift is arithmetic shift. Tested in test-utils.cc. 399 data_delta = data_delta >> kBitsPerByte; 400 } 401 } 402 403 404 void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) { 405 WriteExtraTag(kConstPoolExtraTag, kConstPoolTag); 406 for (int i = 0; i < kIntSize; i++) { 407 *--pos_ = static_cast<byte>(data); 408 // Signed right shift is arithmetic shift. Tested in test-utils.cc. 409 data = data >> kBitsPerByte; 410 } 411 } 412 413 414 void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) { 415 WriteExtraTag(kDataJumpExtraTag, top_tag); 416 for (int i = 0; i < kIntptrSize; i++) { 417 *--pos_ = static_cast<byte>(data_delta); 418 // Signed right shift is arithmetic shift. Tested in test-utils.cc. 419 data_delta = data_delta >> kBitsPerByte; 420 } 421 } 422 423 424 void RelocInfoWriter::Write(const RelocInfo* rinfo) { 425 #ifdef DEBUG 426 byte* begin_pos = pos_; 427 #endif 428 ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); 429 ASSERT(rinfo->pc() - last_pc_ >= 0); 430 ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM 431 <= kMaxStandardNonCompactModes); 432 // Use unsigned delta-encoding for pc. 433 uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_); 434 RelocInfo::Mode rmode = rinfo->rmode(); 435 436 // The two most common modes are given small tags, and usually fit in a byte. 437 if (rmode == RelocInfo::EMBEDDED_OBJECT) { 438 WriteTaggedPC(pc_delta, kEmbeddedObjectTag); 439 } else if (rmode == RelocInfo::CODE_TARGET) { 440 WriteTaggedPC(pc_delta, kCodeTargetTag); 441 ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); 442 } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { 443 // Use signed delta-encoding for id. 444 ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); 445 int id_delta = static_cast<int>(rinfo->data()) - last_id_; 446 // Check if delta is small enough to fit in a tagged byte. 447 if (is_intn(id_delta, kSmallDataBits)) { 448 WriteTaggedPC(pc_delta, kLocatableTag); 449 WriteTaggedData(id_delta, kCodeWithIdTag); 450 } else { 451 // Otherwise, use costly encoding. 452 WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); 453 WriteExtraTaggedIntData(id_delta, kCodeWithIdTag); 454 } 455 last_id_ = static_cast<int>(rinfo->data()); 456 } else if (RelocInfo::IsPosition(rmode)) { 457 // Use signed delta-encoding for position. 458 ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); 459 int pos_delta = static_cast<int>(rinfo->data()) - last_position_; 460 int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag 461 : kStatementPositionTag; 462 // Check if delta is small enough to fit in a tagged byte. 463 if (is_intn(pos_delta, kSmallDataBits)) { 464 WriteTaggedPC(pc_delta, kLocatableTag); 465 WriteTaggedData(pos_delta, pos_type_tag); 466 } else { 467 // Otherwise, use costly encoding. 468 WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); 469 WriteExtraTaggedIntData(pos_delta, pos_type_tag); 470 } 471 last_position_ = static_cast<int>(rinfo->data()); 472 } else if (RelocInfo::IsComment(rmode)) { 473 // Comments are normally not generated, so we use the costly encoding. 474 WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); 475 WriteExtraTaggedData(rinfo->data(), kCommentTag); 476 ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); 477 } else if (RelocInfo::IsConstPool(rmode)) { 478 WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); 479 WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data())); 480 } else { 481 ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM); 482 int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM; 483 // For all other modes we simply use the mode as the extra tag. 484 // None of these modes need a data component. 485 ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag); 486 WriteExtraTaggedPC(pc_delta, saved_mode); 487 } 488 last_pc_ = rinfo->pc(); 489 #ifdef DEBUG 490 ASSERT(begin_pos - pos_ <= kMaxSize); 491 #endif 492 } 493 494 495 inline int RelocIterator::AdvanceGetTag() { 496 return *--pos_ & kTagMask; 497 } 498 499 500 inline int RelocIterator::GetExtraTag() { 501 return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1); 502 } 503 504 505 inline int RelocIterator::GetTopTag() { 506 return *pos_ >> (kTagBits + kExtraTagBits); 507 } 508 509 510 inline void RelocIterator::ReadTaggedPC() { 511 rinfo_.pc_ += *pos_ >> kTagBits; 512 } 513 514 515 inline void RelocIterator::AdvanceReadPC() { 516 rinfo_.pc_ += *--pos_; 517 } 518 519 520 void RelocIterator::AdvanceReadId() { 521 int x = 0; 522 for (int i = 0; i < kIntSize; i++) { 523 x |= static_cast<int>(*--pos_) << i * kBitsPerByte; 524 } 525 last_id_ += x; 526 rinfo_.data_ = last_id_; 527 } 528 529 530 void RelocIterator::AdvanceReadConstPoolData() { 531 int x = 0; 532 for (int i = 0; i < kIntSize; i++) { 533 x |= static_cast<int>(*--pos_) << i * kBitsPerByte; 534 } 535 rinfo_.data_ = x; 536 } 537 538 539 void RelocIterator::AdvanceReadPosition() { 540 int x = 0; 541 for (int i = 0; i < kIntSize; i++) { 542 x |= static_cast<int>(*--pos_) << i * kBitsPerByte; 543 } 544 last_position_ += x; 545 rinfo_.data_ = last_position_; 546 } 547 548 549 void RelocIterator::AdvanceReadData() { 550 intptr_t x = 0; 551 for (int i = 0; i < kIntptrSize; i++) { 552 x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte; 553 } 554 rinfo_.data_ = x; 555 } 556 557 558 void RelocIterator::AdvanceReadVariableLengthPCJump() { 559 // Read the 32-kSmallPCDeltaBits most significant bits of the 560 // pc jump in kChunkBits bit chunks and shift them into place. 561 // Stop when the last chunk is encountered. 562 uint32_t pc_jump = 0; 563 for (int i = 0; i < kIntSize; i++) { 564 byte pc_jump_part = *--pos_; 565 pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits; 566 if ((pc_jump_part & kLastChunkTagMask) == 1) break; 567 } 568 // The least significant kSmallPCDeltaBits bits will be added 569 // later. 570 rinfo_.pc_ += pc_jump << kSmallPCDeltaBits; 571 } 572 573 574 inline int RelocIterator::GetLocatableTypeTag() { 575 return *pos_ & ((1 << kLocatableTypeTagBits) - 1); 576 } 577 578 579 inline void RelocIterator::ReadTaggedId() { 580 int8_t signed_b = *pos_; 581 // Signed right shift is arithmetic shift. Tested in test-utils.cc. 582 last_id_ += signed_b >> kLocatableTypeTagBits; 583 rinfo_.data_ = last_id_; 584 } 585 586 587 inline void RelocIterator::ReadTaggedPosition() { 588 int8_t signed_b = *pos_; 589 // Signed right shift is arithmetic shift. Tested in test-utils.cc. 590 last_position_ += signed_b >> kLocatableTypeTagBits; 591 rinfo_.data_ = last_position_; 592 } 593 594 595 static inline RelocInfo::Mode GetPositionModeFromTag(int tag) { 596 ASSERT(tag == kNonstatementPositionTag || 597 tag == kStatementPositionTag); 598 return (tag == kNonstatementPositionTag) ? 599 RelocInfo::POSITION : 600 RelocInfo::STATEMENT_POSITION; 601 } 602 603 604 void RelocIterator::next() { 605 ASSERT(!done()); 606 // Basically, do the opposite of RelocInfoWriter::Write. 607 // Reading of data is as far as possible avoided for unwanted modes, 608 // but we must always update the pc. 609 // 610 // We exit this loop by returning when we find a mode we want. 611 while (pos_ > end_) { 612 int tag = AdvanceGetTag(); 613 if (tag == kEmbeddedObjectTag) { 614 ReadTaggedPC(); 615 if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return; 616 } else if (tag == kCodeTargetTag) { 617 ReadTaggedPC(); 618 if (SetMode(RelocInfo::CODE_TARGET)) return; 619 } else if (tag == kLocatableTag) { 620 ReadTaggedPC(); 621 Advance(); 622 int locatable_tag = GetLocatableTypeTag(); 623 if (locatable_tag == kCodeWithIdTag) { 624 if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) { 625 ReadTaggedId(); 626 return; 627 } 628 } else { 629 // Compact encoding is never used for comments, 630 // so it must be a position. 631 ASSERT(locatable_tag == kNonstatementPositionTag || 632 locatable_tag == kStatementPositionTag); 633 if (mode_mask_ & RelocInfo::kPositionMask) { 634 ReadTaggedPosition(); 635 if (SetMode(GetPositionModeFromTag(locatable_tag))) return; 636 } 637 } 638 } else { 639 ASSERT(tag == kDefaultTag); 640 int extra_tag = GetExtraTag(); 641 if (extra_tag == kPCJumpExtraTag) { 642 if (GetTopTag() == kVariableLengthPCJumpTopTag) { 643 AdvanceReadVariableLengthPCJump(); 644 } else { 645 AdvanceReadPC(); 646 } 647 } else if (extra_tag == kDataJumpExtraTag) { 648 int locatable_tag = GetTopTag(); 649 if (locatable_tag == kCodeWithIdTag) { 650 if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) { 651 AdvanceReadId(); 652 return; 653 } 654 Advance(kIntSize); 655 } else if (locatable_tag != kCommentTag) { 656 ASSERT(locatable_tag == kNonstatementPositionTag || 657 locatable_tag == kStatementPositionTag); 658 if (mode_mask_ & RelocInfo::kPositionMask) { 659 AdvanceReadPosition(); 660 if (SetMode(GetPositionModeFromTag(locatable_tag))) return; 661 } else { 662 Advance(kIntSize); 663 } 664 } else { 665 ASSERT(locatable_tag == kCommentTag); 666 if (SetMode(RelocInfo::COMMENT)) { 667 AdvanceReadData(); 668 return; 669 } 670 Advance(kIntptrSize); 671 } 672 } else if ((extra_tag == kConstPoolExtraTag) && 673 (GetTopTag() == kConstPoolTag)) { 674 if (SetMode(RelocInfo::CONST_POOL)) { 675 AdvanceReadConstPoolData(); 676 return; 677 } 678 Advance(kIntSize); 679 } else { 680 AdvanceReadPC(); 681 int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM; 682 if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return; 683 } 684 } 685 } 686 if (code_age_sequence_ != NULL) { 687 byte* old_code_age_sequence = code_age_sequence_; 688 code_age_sequence_ = NULL; 689 if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) { 690 rinfo_.data_ = 0; 691 rinfo_.pc_ = old_code_age_sequence; 692 return; 693 } 694 } 695 done_ = true; 696 } 697 698 699 RelocIterator::RelocIterator(Code* code, int mode_mask) { 700 rinfo_.host_ = code; 701 rinfo_.pc_ = code->instruction_start(); 702 rinfo_.data_ = 0; 703 // Relocation info is read backwards. 704 pos_ = code->relocation_start() + code->relocation_size(); 705 end_ = code->relocation_start(); 706 done_ = false; 707 mode_mask_ = mode_mask; 708 last_id_ = 0; 709 last_position_ = 0; 710 byte* sequence = code->FindCodeAgeSequence(); 711 if (sequence != NULL && !Code::IsYoungSequence(sequence)) { 712 code_age_sequence_ = sequence; 713 } else { 714 code_age_sequence_ = NULL; 715 } 716 if (mode_mask_ == 0) pos_ = end_; 717 next(); 718 } 719 720 721 RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { 722 rinfo_.pc_ = desc.buffer; 723 rinfo_.data_ = 0; 724 // Relocation info is read backwards. 725 pos_ = desc.buffer + desc.buffer_size; 726 end_ = pos_ - desc.reloc_size; 727 done_ = false; 728 mode_mask_ = mode_mask; 729 last_id_ = 0; 730 last_position_ = 0; 731 code_age_sequence_ = NULL; 732 if (mode_mask_ == 0) pos_ = end_; 733 next(); 734 } 735 736 737 // ----------------------------------------------------------------------------- 738 // Implementation of RelocInfo 739 740 741 #ifdef DEBUG 742 bool RelocInfo::RequiresRelocation(const CodeDesc& desc) { 743 // Ensure there are no code targets or embedded objects present in the 744 // deoptimization entries, they would require relocation after code 745 // generation. 746 int mode_mask = RelocInfo::kCodeTargetMask | 747 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | 748 RelocInfo::ModeMask(RelocInfo::CELL) | 749 RelocInfo::kApplyMask; 750 RelocIterator it(desc, mode_mask); 751 return !it.done(); 752 } 753 #endif 754 755 756 #ifdef ENABLE_DISASSEMBLER 757 const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { 758 switch (rmode) { 759 case RelocInfo::NONE32: 760 return "no reloc 32"; 761 case RelocInfo::NONE64: 762 return "no reloc 64"; 763 case RelocInfo::EMBEDDED_OBJECT: 764 return "embedded object"; 765 case RelocInfo::CONSTRUCT_CALL: 766 return "code target (js construct call)"; 767 case RelocInfo::CODE_TARGET_CONTEXT: 768 return "code target (context)"; 769 case RelocInfo::DEBUG_BREAK: 770 #ifndef ENABLE_DEBUGGER_SUPPORT 771 UNREACHABLE(); 772 #endif 773 return "debug break"; 774 case RelocInfo::CODE_TARGET: 775 return "code target"; 776 case RelocInfo::CODE_TARGET_WITH_ID: 777 return "code target with id"; 778 case RelocInfo::CELL: 779 return "property cell"; 780 case RelocInfo::RUNTIME_ENTRY: 781 return "runtime entry"; 782 case RelocInfo::JS_RETURN: 783 return "js return"; 784 case RelocInfo::COMMENT: 785 return "comment"; 786 case RelocInfo::POSITION: 787 return "position"; 788 case RelocInfo::STATEMENT_POSITION: 789 return "statement position"; 790 case RelocInfo::EXTERNAL_REFERENCE: 791 return "external reference"; 792 case RelocInfo::INTERNAL_REFERENCE: 793 return "internal reference"; 794 case RelocInfo::CONST_POOL: 795 return "constant pool"; 796 case RelocInfo::DEBUG_BREAK_SLOT: 797 #ifndef ENABLE_DEBUGGER_SUPPORT 798 UNREACHABLE(); 799 #endif 800 return "debug break slot"; 801 case RelocInfo::CODE_AGE_SEQUENCE: 802 return "code_age_sequence"; 803 case RelocInfo::NUMBER_OF_MODES: 804 UNREACHABLE(); 805 return "number_of_modes"; 806 } 807 return "unknown relocation type"; 808 } 809 810 811 void RelocInfo::Print(Isolate* isolate, FILE* out) { 812 PrintF(out, "%p %s", pc_, RelocModeName(rmode_)); 813 if (IsComment(rmode_)) { 814 PrintF(out, " (%s)", reinterpret_cast<char*>(data_)); 815 } else if (rmode_ == EMBEDDED_OBJECT) { 816 PrintF(out, " ("); 817 target_object()->ShortPrint(out); 818 PrintF(out, ")"); 819 } else if (rmode_ == EXTERNAL_REFERENCE) { 820 ExternalReferenceEncoder ref_encoder(isolate); 821 PrintF(out, " (%s) (%p)", 822 ref_encoder.NameOfAddress(target_reference()), 823 target_reference()); 824 } else if (IsCodeTarget(rmode_)) { 825 Code* code = Code::GetCodeFromTargetAddress(target_address()); 826 PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()), 827 target_address()); 828 if (rmode_ == CODE_TARGET_WITH_ID) { 829 PrintF(out, " (id=%d)", static_cast<int>(data_)); 830 } 831 } else if (IsPosition(rmode_)) { 832 PrintF(out, " (%" V8_PTR_PREFIX "d)", data()); 833 } else if (IsRuntimeEntry(rmode_) && 834 isolate->deoptimizer_data() != NULL) { 835 // Depotimization bailouts are stored as runtime entries. 836 int id = Deoptimizer::GetDeoptimizationId( 837 isolate, target_address(), Deoptimizer::EAGER); 838 if (id != Deoptimizer::kNotDeoptimizationEntry) { 839 PrintF(out, " (deoptimization bailout %d)", id); 840 } 841 } 842 843 PrintF(out, "\n"); 844 } 845 #endif // ENABLE_DISASSEMBLER 846 847 848 #ifdef VERIFY_HEAP 849 void RelocInfo::Verify() { 850 switch (rmode_) { 851 case EMBEDDED_OBJECT: 852 Object::VerifyPointer(target_object()); 853 break; 854 case CELL: 855 Object::VerifyPointer(target_cell()); 856 break; 857 case DEBUG_BREAK: 858 #ifndef ENABLE_DEBUGGER_SUPPORT 859 UNREACHABLE(); 860 break; 861 #endif 862 case CONSTRUCT_CALL: 863 case CODE_TARGET_CONTEXT: 864 case CODE_TARGET_WITH_ID: 865 case CODE_TARGET: { 866 // convert inline target address to code object 867 Address addr = target_address(); 868 CHECK(addr != NULL); 869 // Check that we can find the right code object. 870 Code* code = Code::GetCodeFromTargetAddress(addr); 871 Object* found = code->GetIsolate()->FindCodeObject(addr); 872 CHECK(found->IsCode()); 873 CHECK(code->address() == HeapObject::cast(found)->address()); 874 break; 875 } 876 case RUNTIME_ENTRY: 877 case JS_RETURN: 878 case COMMENT: 879 case POSITION: 880 case STATEMENT_POSITION: 881 case EXTERNAL_REFERENCE: 882 case INTERNAL_REFERENCE: 883 case CONST_POOL: 884 case DEBUG_BREAK_SLOT: 885 case NONE32: 886 case NONE64: 887 break; 888 case NUMBER_OF_MODES: 889 UNREACHABLE(); 890 break; 891 case CODE_AGE_SEQUENCE: 892 ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode()); 893 break; 894 } 895 } 896 #endif // VERIFY_HEAP 897 898 899 // ----------------------------------------------------------------------------- 900 // Implementation of ExternalReference 901 902 void ExternalReference::SetUp() { 903 double_constants.min_int = kMinInt; 904 double_constants.one_half = 0.5; 905 double_constants.minus_one_half = -0.5; 906 double_constants.minus_zero = -0.0; 907 double_constants.uint8_max_value = 255; 908 double_constants.zero = 0.0; 909 double_constants.canonical_non_hole_nan = OS::nan_value(); 910 double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64); 911 double_constants.negative_infinity = -V8_INFINITY; 912 double_constants.uint32_bias = 913 static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; 914 915 math_exp_data_mutex = new Mutex(); 916 } 917 918 919 void ExternalReference::InitializeMathExpData() { 920 // Early return? 921 if (math_exp_data_initialized) return; 922 923 LockGuard<Mutex> lock_guard(math_exp_data_mutex); 924 if (!math_exp_data_initialized) { 925 // If this is changed, generated code must be adapted too. 926 const int kTableSizeBits = 11; 927 const int kTableSize = 1 << kTableSizeBits; 928 const double kTableSizeDouble = static_cast<double>(kTableSize); 929 930 math_exp_constants_array = new double[9]; 931 // Input values smaller than this always return 0. 932 math_exp_constants_array[0] = -708.39641853226408; 933 // Input values larger than this always return +Infinity. 934 math_exp_constants_array[1] = 709.78271289338397; 935 math_exp_constants_array[2] = V8_INFINITY; 936 // The rest is black magic. Do not attempt to understand it. It is 937 // loosely based on the "expd" function published at: 938 // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html 939 const double constant3 = (1 << kTableSizeBits) / log(2.0); 940 math_exp_constants_array[3] = constant3; 941 math_exp_constants_array[4] = 942 static_cast<double>(static_cast<int64_t>(3) << 51); 943 math_exp_constants_array[5] = 1 / constant3; 944 math_exp_constants_array[6] = 3.0000000027955394; 945 math_exp_constants_array[7] = 0.16666666685227835; 946 math_exp_constants_array[8] = 1; 947 948 math_exp_log_table_array = new double[kTableSize]; 949 for (int i = 0; i < kTableSize; i++) { 950 double value = pow(2, i / kTableSizeDouble); 951 uint64_t bits = BitCast<uint64_t, double>(value); 952 bits &= (static_cast<uint64_t>(1) << 52) - 1; 953 double mantissa = BitCast<double, uint64_t>(bits); 954 math_exp_log_table_array[i] = mantissa; 955 } 956 957 math_exp_data_initialized = true; 958 } 959 } 960 961 962 void ExternalReference::TearDownMathExpData() { 963 delete[] math_exp_constants_array; 964 delete[] math_exp_log_table_array; 965 delete math_exp_data_mutex; 966 } 967 968 969 ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate) 970 : address_(Redirect(isolate, Builtins::c_function_address(id))) {} 971 972 973 ExternalReference::ExternalReference( 974 ApiFunction* fun, 975 Type type = ExternalReference::BUILTIN_CALL, 976 Isolate* isolate = NULL) 977 : address_(Redirect(isolate, fun->address(), type)) {} 978 979 980 ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate) 981 : address_(isolate->builtins()->builtin_address(name)) {} 982 983 984 ExternalReference::ExternalReference(Runtime::FunctionId id, 985 Isolate* isolate) 986 : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {} 987 988 989 ExternalReference::ExternalReference(const Runtime::Function* f, 990 Isolate* isolate) 991 : address_(Redirect(isolate, f->entry)) {} 992 993 994 ExternalReference ExternalReference::isolate_address(Isolate* isolate) { 995 return ExternalReference(isolate); 996 } 997 998 999 ExternalReference::ExternalReference(const IC_Utility& ic_utility, 1000 Isolate* isolate) 1001 : address_(Redirect(isolate, ic_utility.address())) {} 1002 1003 #ifdef ENABLE_DEBUGGER_SUPPORT 1004 ExternalReference::ExternalReference(const Debug_Address& debug_address, 1005 Isolate* isolate) 1006 : address_(debug_address.address(isolate)) {} 1007 #endif 1008 1009 ExternalReference::ExternalReference(StatsCounter* counter) 1010 : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {} 1011 1012 1013 ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate) 1014 : address_(isolate->get_address_from_id(id)) {} 1015 1016 1017 ExternalReference::ExternalReference(const SCTableReference& table_ref) 1018 : address_(table_ref.address()) {} 1019 1020 1021 ExternalReference ExternalReference:: 1022 incremental_marking_record_write_function(Isolate* isolate) { 1023 return ExternalReference(Redirect( 1024 isolate, 1025 FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode))); 1026 } 1027 1028 1029 ExternalReference ExternalReference:: 1030 incremental_evacuation_record_write_function(Isolate* isolate) { 1031 return ExternalReference(Redirect( 1032 isolate, 1033 FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode))); 1034 } 1035 1036 1037 ExternalReference ExternalReference:: 1038 store_buffer_overflow_function(Isolate* isolate) { 1039 return ExternalReference(Redirect( 1040 isolate, 1041 FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow))); 1042 } 1043 1044 1045 ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { 1046 return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache))); 1047 } 1048 1049 1050 ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { 1051 return 1052 ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); 1053 } 1054 1055 1056 ExternalReference ExternalReference::delete_handle_scope_extensions( 1057 Isolate* isolate) { 1058 return ExternalReference(Redirect( 1059 isolate, 1060 FUNCTION_ADDR(HandleScope::DeleteExtensions))); 1061 } 1062 1063 1064 ExternalReference ExternalReference::get_date_field_function( 1065 Isolate* isolate) { 1066 return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField))); 1067 } 1068 1069 1070 ExternalReference ExternalReference::get_make_code_young_function( 1071 Isolate* isolate) { 1072 return ExternalReference(Redirect( 1073 isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung))); 1074 } 1075 1076 1077 ExternalReference ExternalReference::get_mark_code_as_executed_function( 1078 Isolate* isolate) { 1079 return ExternalReference(Redirect( 1080 isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted))); 1081 } 1082 1083 1084 ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { 1085 return ExternalReference(isolate->date_cache()->stamp_address()); 1086 } 1087 1088 1089 ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) { 1090 return ExternalReference(isolate->stress_deopt_count_address()); 1091 } 1092 1093 1094 ExternalReference ExternalReference::transcendental_cache_array_address( 1095 Isolate* isolate) { 1096 return ExternalReference( 1097 isolate->transcendental_cache()->cache_array_address()); 1098 } 1099 1100 1101 ExternalReference ExternalReference::new_deoptimizer_function( 1102 Isolate* isolate) { 1103 return ExternalReference( 1104 Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New))); 1105 } 1106 1107 1108 ExternalReference ExternalReference::compute_output_frames_function( 1109 Isolate* isolate) { 1110 return ExternalReference( 1111 Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames))); 1112 } 1113 1114 1115 ExternalReference ExternalReference::log_enter_external_function( 1116 Isolate* isolate) { 1117 return ExternalReference( 1118 Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal))); 1119 } 1120 1121 1122 ExternalReference ExternalReference::log_leave_external_function( 1123 Isolate* isolate) { 1124 return ExternalReference( 1125 Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal))); 1126 } 1127 1128 1129 ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { 1130 return ExternalReference(isolate->keyed_lookup_cache()->keys_address()); 1131 } 1132 1133 1134 ExternalReference ExternalReference::keyed_lookup_cache_field_offsets( 1135 Isolate* isolate) { 1136 return ExternalReference( 1137 isolate->keyed_lookup_cache()->field_offsets_address()); 1138 } 1139 1140 1141 ExternalReference ExternalReference::roots_array_start(Isolate* isolate) { 1142 return ExternalReference(isolate->heap()->roots_array_start()); 1143 } 1144 1145 1146 ExternalReference ExternalReference::allocation_sites_list_address( 1147 Isolate* isolate) { 1148 return ExternalReference(isolate->heap()->allocation_sites_list_address()); 1149 } 1150 1151 1152 ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) { 1153 return ExternalReference(isolate->stack_guard()->address_of_jslimit()); 1154 } 1155 1156 1157 ExternalReference ExternalReference::address_of_real_stack_limit( 1158 Isolate* isolate) { 1159 return ExternalReference(isolate->stack_guard()->address_of_real_jslimit()); 1160 } 1161 1162 1163 ExternalReference ExternalReference::address_of_regexp_stack_limit( 1164 Isolate* isolate) { 1165 return ExternalReference(isolate->regexp_stack()->limit_address()); 1166 } 1167 1168 1169 ExternalReference ExternalReference::new_space_start(Isolate* isolate) { 1170 return ExternalReference(isolate->heap()->NewSpaceStart()); 1171 } 1172 1173 1174 ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { 1175 return ExternalReference(isolate->heap()->store_buffer()->TopAddress()); 1176 } 1177 1178 1179 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { 1180 return ExternalReference(reinterpret_cast<Address>( 1181 isolate->heap()->NewSpaceMask())); 1182 } 1183 1184 1185 ExternalReference ExternalReference::new_space_allocation_top_address( 1186 Isolate* isolate) { 1187 return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress()); 1188 } 1189 1190 1191 ExternalReference ExternalReference::heap_always_allocate_scope_depth( 1192 Isolate* isolate) { 1193 Heap* heap = isolate->heap(); 1194 return ExternalReference(heap->always_allocate_scope_depth_address()); 1195 } 1196 1197 1198 ExternalReference ExternalReference::new_space_allocation_limit_address( 1199 Isolate* isolate) { 1200 return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress()); 1201 } 1202 1203 1204 ExternalReference ExternalReference::old_pointer_space_allocation_top_address( 1205 Isolate* isolate) { 1206 return ExternalReference( 1207 isolate->heap()->OldPointerSpaceAllocationTopAddress()); 1208 } 1209 1210 1211 ExternalReference ExternalReference::old_pointer_space_allocation_limit_address( 1212 Isolate* isolate) { 1213 return ExternalReference( 1214 isolate->heap()->OldPointerSpaceAllocationLimitAddress()); 1215 } 1216 1217 1218 ExternalReference ExternalReference::old_data_space_allocation_top_address( 1219 Isolate* isolate) { 1220 return ExternalReference( 1221 isolate->heap()->OldDataSpaceAllocationTopAddress()); 1222 } 1223 1224 1225 ExternalReference ExternalReference::old_data_space_allocation_limit_address( 1226 Isolate* isolate) { 1227 return ExternalReference( 1228 isolate->heap()->OldDataSpaceAllocationLimitAddress()); 1229 } 1230 1231 1232 ExternalReference ExternalReference:: 1233 new_space_high_promotion_mode_active_address(Isolate* isolate) { 1234 return ExternalReference( 1235 isolate->heap()->NewSpaceHighPromotionModeActiveAddress()); 1236 } 1237 1238 1239 ExternalReference ExternalReference::handle_scope_level_address( 1240 Isolate* isolate) { 1241 return ExternalReference(HandleScope::current_level_address(isolate)); 1242 } 1243 1244 1245 ExternalReference ExternalReference::handle_scope_next_address( 1246 Isolate* isolate) { 1247 return ExternalReference(HandleScope::current_next_address(isolate)); 1248 } 1249 1250 1251 ExternalReference ExternalReference::handle_scope_limit_address( 1252 Isolate* isolate) { 1253 return ExternalReference(HandleScope::current_limit_address(isolate)); 1254 } 1255 1256 1257 ExternalReference ExternalReference::scheduled_exception_address( 1258 Isolate* isolate) { 1259 return ExternalReference(isolate->scheduled_exception_address()); 1260 } 1261 1262 1263 ExternalReference ExternalReference::address_of_pending_message_obj( 1264 Isolate* isolate) { 1265 return ExternalReference(isolate->pending_message_obj_address()); 1266 } 1267 1268 1269 ExternalReference ExternalReference::address_of_has_pending_message( 1270 Isolate* isolate) { 1271 return ExternalReference(isolate->has_pending_message_address()); 1272 } 1273 1274 1275 ExternalReference ExternalReference::address_of_pending_message_script( 1276 Isolate* isolate) { 1277 return ExternalReference(isolate->pending_message_script_address()); 1278 } 1279 1280 1281 ExternalReference ExternalReference::address_of_min_int() { 1282 return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int)); 1283 } 1284 1285 1286 ExternalReference ExternalReference::address_of_one_half() { 1287 return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half)); 1288 } 1289 1290 1291 ExternalReference ExternalReference::address_of_minus_one_half() { 1292 return ExternalReference( 1293 reinterpret_cast<void*>(&double_constants.minus_one_half)); 1294 } 1295 1296 1297 ExternalReference ExternalReference::address_of_minus_zero() { 1298 return ExternalReference( 1299 reinterpret_cast<void*>(&double_constants.minus_zero)); 1300 } 1301 1302 1303 ExternalReference ExternalReference::address_of_zero() { 1304 return ExternalReference(reinterpret_cast<void*>(&double_constants.zero)); 1305 } 1306 1307 1308 ExternalReference ExternalReference::address_of_uint8_max_value() { 1309 return ExternalReference( 1310 reinterpret_cast<void*>(&double_constants.uint8_max_value)); 1311 } 1312 1313 1314 ExternalReference ExternalReference::address_of_negative_infinity() { 1315 return ExternalReference( 1316 reinterpret_cast<void*>(&double_constants.negative_infinity)); 1317 } 1318 1319 1320 ExternalReference ExternalReference::address_of_canonical_non_hole_nan() { 1321 return ExternalReference( 1322 reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan)); 1323 } 1324 1325 1326 ExternalReference ExternalReference::address_of_the_hole_nan() { 1327 return ExternalReference( 1328 reinterpret_cast<void*>(&double_constants.the_hole_nan)); 1329 } 1330 1331 1332 ExternalReference ExternalReference::address_of_uint32_bias() { 1333 return ExternalReference( 1334 reinterpret_cast<void*>(&double_constants.uint32_bias)); 1335 } 1336 1337 1338 #ifndef V8_INTERPRETED_REGEXP 1339 1340 ExternalReference ExternalReference::re_check_stack_guard_state( 1341 Isolate* isolate) { 1342 Address function; 1343 #if V8_TARGET_ARCH_X64 1344 function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState); 1345 #elif V8_TARGET_ARCH_IA32 1346 function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); 1347 #elif V8_TARGET_ARCH_ARM 1348 function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); 1349 #elif V8_TARGET_ARCH_MIPS 1350 function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState); 1351 #else 1352 UNREACHABLE(); 1353 #endif 1354 return ExternalReference(Redirect(isolate, function)); 1355 } 1356 1357 1358 ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) { 1359 return ExternalReference( 1360 Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack))); 1361 } 1362 1363 ExternalReference ExternalReference::re_case_insensitive_compare_uc16( 1364 Isolate* isolate) { 1365 return ExternalReference(Redirect( 1366 isolate, 1367 FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16))); 1368 } 1369 1370 1371 ExternalReference ExternalReference::re_word_character_map() { 1372 return ExternalReference( 1373 NativeRegExpMacroAssembler::word_character_map_address()); 1374 } 1375 1376 ExternalReference ExternalReference::address_of_static_offsets_vector( 1377 Isolate* isolate) { 1378 return ExternalReference( 1379 reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector())); 1380 } 1381 1382 ExternalReference ExternalReference::address_of_regexp_stack_memory_address( 1383 Isolate* isolate) { 1384 return ExternalReference( 1385 isolate->regexp_stack()->memory_address()); 1386 } 1387 1388 ExternalReference ExternalReference::address_of_regexp_stack_memory_size( 1389 Isolate* isolate) { 1390 return ExternalReference(isolate->regexp_stack()->memory_size_address()); 1391 } 1392 1393 #endif // V8_INTERPRETED_REGEXP 1394 1395 1396 static double add_two_doubles(double x, double y) { 1397 return x + y; 1398 } 1399 1400 1401 static double sub_two_doubles(double x, double y) { 1402 return x - y; 1403 } 1404 1405 1406 static double mul_two_doubles(double x, double y) { 1407 return x * y; 1408 } 1409 1410 1411 static double div_two_doubles(double x, double y) { 1412 return x / y; 1413 } 1414 1415 1416 static double mod_two_doubles(double x, double y) { 1417 return modulo(x, y); 1418 } 1419 1420 1421 static double math_sin_double(double x) { 1422 return sin(x); 1423 } 1424 1425 1426 static double math_cos_double(double x) { 1427 return cos(x); 1428 } 1429 1430 1431 static double math_tan_double(double x) { 1432 return tan(x); 1433 } 1434 1435 1436 static double math_log_double(double x) { 1437 return log(x); 1438 } 1439 1440 1441 ExternalReference ExternalReference::math_sin_double_function( 1442 Isolate* isolate) { 1443 return ExternalReference(Redirect(isolate, 1444 FUNCTION_ADDR(math_sin_double), 1445 BUILTIN_FP_CALL)); 1446 } 1447 1448 1449 ExternalReference ExternalReference::math_cos_double_function( 1450 Isolate* isolate) { 1451 return ExternalReference(Redirect(isolate, 1452 FUNCTION_ADDR(math_cos_double), 1453 BUILTIN_FP_CALL)); 1454 } 1455 1456 1457 ExternalReference ExternalReference::math_tan_double_function( 1458 Isolate* isolate) { 1459 return ExternalReference(Redirect(isolate, 1460 FUNCTION_ADDR(math_tan_double), 1461 BUILTIN_FP_CALL)); 1462 } 1463 1464 1465 ExternalReference ExternalReference::math_log_double_function( 1466 Isolate* isolate) { 1467 return ExternalReference(Redirect(isolate, 1468 FUNCTION_ADDR(math_log_double), 1469 BUILTIN_FP_CALL)); 1470 } 1471 1472 1473 ExternalReference ExternalReference::math_exp_constants(int constant_index) { 1474 ASSERT(math_exp_data_initialized); 1475 return ExternalReference( 1476 reinterpret_cast<void*>(math_exp_constants_array + constant_index)); 1477 } 1478 1479 1480 ExternalReference ExternalReference::math_exp_log_table() { 1481 ASSERT(math_exp_data_initialized); 1482 return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array)); 1483 } 1484 1485 1486 ExternalReference ExternalReference::page_flags(Page* page) { 1487 return ExternalReference(reinterpret_cast<Address>(page) + 1488 MemoryChunk::kFlagsOffset); 1489 } 1490 1491 1492 ExternalReference ExternalReference::ForDeoptEntry(Address entry) { 1493 return ExternalReference(entry); 1494 } 1495 1496 1497 double power_helper(double x, double y) { 1498 int y_int = static_cast<int>(y); 1499 if (y == y_int) { 1500 return power_double_int(x, y_int); // Returns 1 if exponent is 0. 1501 } 1502 if (y == 0.5) { 1503 return (std::isinf(x)) ? V8_INFINITY 1504 : fast_sqrt(x + 0.0); // Convert -0 to +0. 1505 } 1506 if (y == -0.5) { 1507 return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. 1508 } 1509 return power_double_double(x, y); 1510 } 1511 1512 1513 // Helper function to compute x^y, where y is known to be an 1514 // integer. Uses binary decomposition to limit the number of 1515 // multiplications; see the discussion in "Hacker's Delight" by Henry 1516 // S. Warren, Jr., figure 11-6, page 213. 1517 double power_double_int(double x, int y) { 1518 double m = (y < 0) ? 1 / x : x; 1519 unsigned n = (y < 0) ? -y : y; 1520 double p = 1; 1521 while (n != 0) { 1522 if ((n & 1) != 0) p *= m; 1523 m *= m; 1524 if ((n & 2) != 0) p *= m; 1525 m *= m; 1526 n >>= 2; 1527 } 1528 return p; 1529 } 1530 1531 1532 double power_double_double(double x, double y) { 1533 #if defined(__MINGW64_VERSION_MAJOR) && \ 1534 (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1) 1535 // MinGW64 has a custom implementation for pow. This handles certain 1536 // special cases that are different. 1537 if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) { 1538 double f; 1539 if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; 1540 } 1541 1542 if (x == 2.0) { 1543 int y_int = static_cast<int>(y); 1544 if (y == y_int) return ldexp(1.0, y_int); 1545 } 1546 #endif 1547 1548 // The checks for special cases can be dropped in ia32 because it has already 1549 // been done in generated code before bailing out here. 1550 if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) { 1551 return OS::nan_value(); 1552 } 1553 return pow(x, y); 1554 } 1555 1556 1557 ExternalReference ExternalReference::power_double_double_function( 1558 Isolate* isolate) { 1559 return ExternalReference(Redirect(isolate, 1560 FUNCTION_ADDR(power_double_double), 1561 BUILTIN_FP_FP_CALL)); 1562 } 1563 1564 1565 ExternalReference ExternalReference::power_double_int_function( 1566 Isolate* isolate) { 1567 return ExternalReference(Redirect(isolate, 1568 FUNCTION_ADDR(power_double_int), 1569 BUILTIN_FP_INT_CALL)); 1570 } 1571 1572 1573 static int native_compare_doubles(double y, double x) { 1574 if (x == y) return EQUAL; 1575 return x < y ? LESS : GREATER; 1576 } 1577 1578 1579 bool EvalComparison(Token::Value op, double op1, double op2) { 1580 ASSERT(Token::IsCompareOp(op)); 1581 switch (op) { 1582 case Token::EQ: 1583 case Token::EQ_STRICT: return (op1 == op2); 1584 case Token::NE: return (op1 != op2); 1585 case Token::LT: return (op1 < op2); 1586 case Token::GT: return (op1 > op2); 1587 case Token::LTE: return (op1 <= op2); 1588 case Token::GTE: return (op1 >= op2); 1589 default: 1590 UNREACHABLE(); 1591 return false; 1592 } 1593 } 1594 1595 1596 ExternalReference ExternalReference::double_fp_operation( 1597 Token::Value operation, Isolate* isolate) { 1598 typedef double BinaryFPOperation(double x, double y); 1599 BinaryFPOperation* function = NULL; 1600 switch (operation) { 1601 case Token::ADD: 1602 function = &add_two_doubles; 1603 break; 1604 case Token::SUB: 1605 function = &sub_two_doubles; 1606 break; 1607 case Token::MUL: 1608 function = &mul_two_doubles; 1609 break; 1610 case Token::DIV: 1611 function = &div_two_doubles; 1612 break; 1613 case Token::MOD: 1614 function = &mod_two_doubles; 1615 break; 1616 default: 1617 UNREACHABLE(); 1618 } 1619 return ExternalReference(Redirect(isolate, 1620 FUNCTION_ADDR(function), 1621 BUILTIN_FP_FP_CALL)); 1622 } 1623 1624 1625 ExternalReference ExternalReference::compare_doubles(Isolate* isolate) { 1626 return ExternalReference(Redirect(isolate, 1627 FUNCTION_ADDR(native_compare_doubles), 1628 BUILTIN_COMPARE_CALL)); 1629 } 1630 1631 1632 #ifdef ENABLE_DEBUGGER_SUPPORT 1633 ExternalReference ExternalReference::debug_break(Isolate* isolate) { 1634 return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break))); 1635 } 1636 1637 1638 ExternalReference ExternalReference::debug_step_in_fp_address( 1639 Isolate* isolate) { 1640 return ExternalReference(isolate->debug()->step_in_fp_addr()); 1641 } 1642 #endif 1643 1644 1645 void PositionsRecorder::RecordPosition(int pos) { 1646 ASSERT(pos != RelocInfo::kNoPosition); 1647 ASSERT(pos >= 0); 1648 state_.current_position = pos; 1649 #ifdef ENABLE_GDB_JIT_INTERFACE 1650 if (gdbjit_lineinfo_ != NULL) { 1651 gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false); 1652 } 1653 #endif 1654 LOG_CODE_EVENT(assembler_->isolate(), 1655 CodeLinePosInfoAddPositionEvent(jit_handler_data_, 1656 assembler_->pc_offset(), 1657 pos)); 1658 } 1659 1660 1661 void PositionsRecorder::RecordStatementPosition(int pos) { 1662 ASSERT(pos != RelocInfo::kNoPosition); 1663 ASSERT(pos >= 0); 1664 state_.current_statement_position = pos; 1665 #ifdef ENABLE_GDB_JIT_INTERFACE 1666 if (gdbjit_lineinfo_ != NULL) { 1667 gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true); 1668 } 1669 #endif 1670 LOG_CODE_EVENT(assembler_->isolate(), 1671 CodeLinePosInfoAddStatementPositionEvent( 1672 jit_handler_data_, 1673 assembler_->pc_offset(), 1674 pos)); 1675 } 1676 1677 1678 bool PositionsRecorder::WriteRecordedPositions() { 1679 bool written = false; 1680 1681 // Write the statement position if it is different from what was written last 1682 // time. 1683 if (state_.current_statement_position != state_.written_statement_position) { 1684 EnsureSpace ensure_space(assembler_); 1685 assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION, 1686 state_.current_statement_position); 1687 state_.written_statement_position = state_.current_statement_position; 1688 written = true; 1689 } 1690 1691 // Write the position if it is different from what was written last time and 1692 // also different from the written statement position. 1693 if (state_.current_position != state_.written_position && 1694 state_.current_position != state_.written_statement_position) { 1695 EnsureSpace ensure_space(assembler_); 1696 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); 1697 state_.written_position = state_.current_position; 1698 written = true; 1699 } 1700 1701 // Return whether something was written. 1702 return written; 1703 } 1704 1705 } } // namespace v8::internal 1706