1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_ 29 #define V8_X64_MACRO_ASSEMBLER_X64_H_ 30 31 #include "assembler.h" 32 #include "frames.h" 33 #include "v8globals.h" 34 35 namespace v8 { 36 namespace internal { 37 38 // Default scratch register used by MacroAssembler (and other code that needs 39 // a spare register). The register isn't callee save, and not used by the 40 // function calling convention. 41 const Register kScratchRegister = { 10 }; // r10. 42 const Register kSmiConstantRegister = { 12 }; // r12 (callee save). 43 const Register kRootRegister = { 13 }; // r13 (callee save). 44 // Value of smi in kSmiConstantRegister. 45 const int kSmiConstantRegisterValue = 1; 46 // Actual value of root register is offset from the root array's start 47 // to take advantage of negitive 8-bit displacement values. 48 const int kRootRegisterBias = 128; 49 50 // Convenience for platform-independent signatures. 51 typedef Operand MemOperand; 52 53 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 54 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 55 56 bool AreAliased(Register r1, Register r2, Register r3, Register r4); 57 58 // Forward declaration. 59 class JumpTarget; 60 61 struct SmiIndex { 62 SmiIndex(Register index_register, ScaleFactor scale) 63 : reg(index_register), 64 scale(scale) {} 65 Register reg; 66 ScaleFactor scale; 67 }; 68 69 70 // MacroAssembler implements a collection of frequently used macros. 71 class MacroAssembler: public Assembler { 72 public: 73 // The isolate parameter can be NULL if the macro assembler should 74 // not use isolate-dependent functionality. In this case, it's the 75 // responsibility of the caller to never invoke such function on the 76 // macro assembler. 77 MacroAssembler(Isolate* isolate, void* buffer, int size); 78 79 // Prevent the use of the RootArray during the lifetime of this 80 // scope object. 81 class NoRootArrayScope BASE_EMBEDDED { 82 public: 83 explicit NoRootArrayScope(MacroAssembler* assembler) 84 : variable_(&assembler->root_array_available_), 85 old_value_(assembler->root_array_available_) { 86 assembler->root_array_available_ = false; 87 } 88 ~NoRootArrayScope() { 89 *variable_ = old_value_; 90 } 91 private: 92 bool* variable_; 93 bool old_value_; 94 }; 95 96 // Operand pointing to an external reference. 97 // May emit code to set up the scratch register. The operand is 98 // only guaranteed to be correct as long as the scratch register 99 // isn't changed. 100 // If the operand is used more than once, use a scratch register 101 // that is guaranteed not to be clobbered. 102 Operand ExternalOperand(ExternalReference reference, 103 Register scratch = kScratchRegister); 104 // Loads and stores the value of an external reference. 105 // Special case code for load and store to take advantage of 106 // load_rax/store_rax if possible/necessary. 107 // For other operations, just use: 108 // Operand operand = ExternalOperand(extref); 109 // operation(operand, ..); 110 void Load(Register destination, ExternalReference source); 111 void Store(ExternalReference destination, Register source); 112 // Loads the address of the external reference into the destination 113 // register. 114 void LoadAddress(Register destination, ExternalReference source); 115 // Returns the size of the code generated by LoadAddress. 116 // Used by CallSize(ExternalReference) to find the size of a call. 117 int LoadAddressSize(ExternalReference source); 118 // Pushes the address of the external reference onto the stack. 119 void PushAddress(ExternalReference source); 120 121 // Operations on roots in the root-array. 122 void LoadRoot(Register destination, Heap::RootListIndex index); 123 void StoreRoot(Register source, Heap::RootListIndex index); 124 // Load a root value where the index (or part of it) is variable. 125 // The variable_offset register is added to the fixed_offset value 126 // to get the index into the root-array. 127 void LoadRootIndexed(Register destination, 128 Register variable_offset, 129 int fixed_offset); 130 void CompareRoot(Register with, Heap::RootListIndex index); 131 void CompareRoot(const Operand& with, Heap::RootListIndex index); 132 void PushRoot(Heap::RootListIndex index); 133 134 // These functions do not arrange the registers in any particular order so 135 // they are not useful for calls that can cause a GC. The caller can 136 // exclude up to 3 registers that do not need to be saved and restored. 137 void PushCallerSaved(SaveFPRegsMode fp_mode, 138 Register exclusion1 = no_reg, 139 Register exclusion2 = no_reg, 140 Register exclusion3 = no_reg); 141 void PopCallerSaved(SaveFPRegsMode fp_mode, 142 Register exclusion1 = no_reg, 143 Register exclusion2 = no_reg, 144 Register exclusion3 = no_reg); 145 146 // --------------------------------------------------------------------------- 147 // GC Support 148 149 150 enum RememberedSetFinalAction { 151 kReturnAtEnd, 152 kFallThroughAtEnd 153 }; 154 155 // Record in the remembered set the fact that we have a pointer to new space 156 // at the address pointed to by the addr register. Only works if addr is not 157 // in new space. 158 void RememberedSetHelper(Register object, // Used for debug code. 159 Register addr, 160 Register scratch, 161 SaveFPRegsMode save_fp, 162 RememberedSetFinalAction and_then); 163 164 void CheckPageFlag(Register object, 165 Register scratch, 166 int mask, 167 Condition cc, 168 Label* condition_met, 169 Label::Distance condition_met_distance = Label::kFar); 170 171 void CheckMapDeprecated(Handle<Map> map, 172 Register scratch, 173 Label* if_deprecated); 174 175 // Check if object is in new space. Jumps if the object is not in new space. 176 // The register scratch can be object itself, but scratch will be clobbered. 177 void JumpIfNotInNewSpace(Register object, 178 Register scratch, 179 Label* branch, 180 Label::Distance distance = Label::kFar) { 181 InNewSpace(object, scratch, not_equal, branch, distance); 182 } 183 184 // Check if object is in new space. Jumps if the object is in new space. 185 // The register scratch can be object itself, but it will be clobbered. 186 void JumpIfInNewSpace(Register object, 187 Register scratch, 188 Label* branch, 189 Label::Distance distance = Label::kFar) { 190 InNewSpace(object, scratch, equal, branch, distance); 191 } 192 193 // Check if an object has the black incremental marking color. Also uses rcx! 194 void JumpIfBlack(Register object, 195 Register scratch0, 196 Register scratch1, 197 Label* on_black, 198 Label::Distance on_black_distance = Label::kFar); 199 200 // Detects conservatively whether an object is data-only, i.e. it does need to 201 // be scanned by the garbage collector. 202 void JumpIfDataObject(Register value, 203 Register scratch, 204 Label* not_data_object, 205 Label::Distance not_data_object_distance); 206 207 // Checks the color of an object. If the object is already grey or black 208 // then we just fall through, since it is already live. If it is white and 209 // we can determine that it doesn't need to be scanned, then we just mark it 210 // black and fall through. For the rest we jump to the label so the 211 // incremental marker can fix its assumptions. 212 void EnsureNotWhite(Register object, 213 Register scratch1, 214 Register scratch2, 215 Label* object_is_white_and_not_data, 216 Label::Distance distance); 217 218 // Notify the garbage collector that we wrote a pointer into an object. 219 // |object| is the object being stored into, |value| is the object being 220 // stored. value and scratch registers are clobbered by the operation. 221 // The offset is the offset from the start of the object, not the offset from 222 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 223 void RecordWriteField( 224 Register object, 225 int offset, 226 Register value, 227 Register scratch, 228 SaveFPRegsMode save_fp, 229 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 230 SmiCheck smi_check = INLINE_SMI_CHECK); 231 232 // As above, but the offset has the tag presubtracted. For use with 233 // Operand(reg, off). 234 void RecordWriteContextSlot( 235 Register context, 236 int offset, 237 Register value, 238 Register scratch, 239 SaveFPRegsMode save_fp, 240 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 241 SmiCheck smi_check = INLINE_SMI_CHECK) { 242 RecordWriteField(context, 243 offset + kHeapObjectTag, 244 value, 245 scratch, 246 save_fp, 247 remembered_set_action, 248 smi_check); 249 } 250 251 // Notify the garbage collector that we wrote a pointer into a fixed array. 252 // |array| is the array being stored into, |value| is the 253 // object being stored. |index| is the array index represented as a non-smi. 254 // All registers are clobbered by the operation RecordWriteArray 255 // filters out smis so it does not update the write barrier if the 256 // value is a smi. 257 void RecordWriteArray( 258 Register array, 259 Register value, 260 Register index, 261 SaveFPRegsMode save_fp, 262 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 263 SmiCheck smi_check = INLINE_SMI_CHECK); 264 265 // For page containing |object| mark region covering |address| 266 // dirty. |object| is the object being stored into, |value| is the 267 // object being stored. The address and value registers are clobbered by the 268 // operation. RecordWrite filters out smis so it does not update 269 // the write barrier if the value is a smi. 270 void RecordWrite( 271 Register object, 272 Register address, 273 Register value, 274 SaveFPRegsMode save_fp, 275 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 276 SmiCheck smi_check = INLINE_SMI_CHECK); 277 278 #ifdef ENABLE_DEBUGGER_SUPPORT 279 // --------------------------------------------------------------------------- 280 // Debugger Support 281 282 void DebugBreak(); 283 #endif 284 285 // Enter specific kind of exit frame; either in normal or 286 // debug mode. Expects the number of arguments in register rax and 287 // sets up the number of arguments in register rdi and the pointer 288 // to the first argument in register rsi. 289 // 290 // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack 291 // accessible via StackSpaceOperand. 292 void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false); 293 294 // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize 295 // memory (not GCed) on the stack accessible via StackSpaceOperand. 296 void EnterApiExitFrame(int arg_stack_space); 297 298 // Leave the current exit frame. Expects/provides the return value in 299 // register rax:rdx (untouched) and the pointer to the first 300 // argument in register rsi. 301 void LeaveExitFrame(bool save_doubles = false); 302 303 // Leave the current exit frame. Expects/provides the return value in 304 // register rax (untouched). 305 void LeaveApiExitFrame(); 306 307 // Push and pop the registers that can hold pointers. 308 void PushSafepointRegisters() { Pushad(); } 309 void PopSafepointRegisters() { Popad(); } 310 // Store the value in register src in the safepoint register stack 311 // slot for register dst. 312 void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm); 313 void StoreToSafepointRegisterSlot(Register dst, Register src); 314 void LoadFromSafepointRegisterSlot(Register dst, Register src); 315 316 void InitializeRootRegister() { 317 ExternalReference roots_array_start = 318 ExternalReference::roots_array_start(isolate()); 319 movq(kRootRegister, roots_array_start); 320 addq(kRootRegister, Immediate(kRootRegisterBias)); 321 } 322 323 // --------------------------------------------------------------------------- 324 // JavaScript invokes 325 326 // Set up call kind marking in rcx. The method takes rcx as an 327 // explicit first parameter to make the code more readable at the 328 // call sites. 329 void SetCallKind(Register dst, CallKind kind); 330 331 // Invoke the JavaScript function code by either calling or jumping. 332 void InvokeCode(Register code, 333 const ParameterCount& expected, 334 const ParameterCount& actual, 335 InvokeFlag flag, 336 const CallWrapper& call_wrapper, 337 CallKind call_kind); 338 339 void InvokeCode(Handle<Code> code, 340 const ParameterCount& expected, 341 const ParameterCount& actual, 342 RelocInfo::Mode rmode, 343 InvokeFlag flag, 344 const CallWrapper& call_wrapper, 345 CallKind call_kind); 346 347 // Invoke the JavaScript function in the given register. Changes the 348 // current context to the context in the function before invoking. 349 void InvokeFunction(Register function, 350 const ParameterCount& actual, 351 InvokeFlag flag, 352 const CallWrapper& call_wrapper, 353 CallKind call_kind); 354 355 void InvokeFunction(Handle<JSFunction> function, 356 const ParameterCount& expected, 357 const ParameterCount& actual, 358 InvokeFlag flag, 359 const CallWrapper& call_wrapper, 360 CallKind call_kind); 361 362 // Invoke specified builtin JavaScript function. Adds an entry to 363 // the unresolved list if the name does not resolve. 364 void InvokeBuiltin(Builtins::JavaScript id, 365 InvokeFlag flag, 366 const CallWrapper& call_wrapper = NullCallWrapper()); 367 368 // Store the function for the given builtin in the target register. 369 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 370 371 // Store the code object for the given builtin in the target register. 372 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 373 374 375 // --------------------------------------------------------------------------- 376 // Smi tagging, untagging and operations on tagged smis. 377 378 void InitializeSmiConstantRegister() { 379 movq(kSmiConstantRegister, 380 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), 381 RelocInfo::NONE64); 382 } 383 384 // Conversions between tagged smi values and non-tagged integer values. 385 386 // Tag an integer value. The result must be known to be a valid smi value. 387 // Only uses the low 32 bits of the src register. Sets the N and Z flags 388 // based on the value of the resulting smi. 389 void Integer32ToSmi(Register dst, Register src); 390 391 // Stores an integer32 value into a memory field that already holds a smi. 392 void Integer32ToSmiField(const Operand& dst, Register src); 393 394 // Adds constant to src and tags the result as a smi. 395 // Result must be a valid smi. 396 void Integer64PlusConstantToSmi(Register dst, Register src, int constant); 397 398 // Convert smi to 32-bit integer. I.e., not sign extended into 399 // high 32 bits of destination. 400 void SmiToInteger32(Register dst, Register src); 401 void SmiToInteger32(Register dst, const Operand& src); 402 403 // Convert smi to 64-bit integer (sign extended if necessary). 404 void SmiToInteger64(Register dst, Register src); 405 void SmiToInteger64(Register dst, const Operand& src); 406 407 // Multiply a positive smi's integer value by a power of two. 408 // Provides result as 64-bit integer value. 409 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst, 410 Register src, 411 int power); 412 413 // Divide a positive smi's integer value by a power of two. 414 // Provides result as 32-bit integer value. 415 void PositiveSmiDivPowerOfTwoToInteger32(Register dst, 416 Register src, 417 int power); 418 419 // Perform the logical or of two smi values and return a smi value. 420 // If either argument is not a smi, jump to on_not_smis and retain 421 // the original values of source registers. The destination register 422 // may be changed if it's not one of the source registers. 423 void SmiOrIfSmis(Register dst, 424 Register src1, 425 Register src2, 426 Label* on_not_smis, 427 Label::Distance near_jump = Label::kFar); 428 429 430 // Simple comparison of smis. Both sides must be known smis to use these, 431 // otherwise use Cmp. 432 void SmiCompare(Register smi1, Register smi2); 433 void SmiCompare(Register dst, Smi* src); 434 void SmiCompare(Register dst, const Operand& src); 435 void SmiCompare(const Operand& dst, Register src); 436 void SmiCompare(const Operand& dst, Smi* src); 437 // Compare the int32 in src register to the value of the smi stored at dst. 438 void SmiCompareInteger32(const Operand& dst, Register src); 439 // Sets sign and zero flags depending on value of smi in register. 440 void SmiTest(Register src); 441 442 // Functions performing a check on a known or potential smi. Returns 443 // a condition that is satisfied if the check is successful. 444 445 // Is the value a tagged smi. 446 Condition CheckSmi(Register src); 447 Condition CheckSmi(const Operand& src); 448 449 // Is the value a non-negative tagged smi. 450 Condition CheckNonNegativeSmi(Register src); 451 452 // Are both values tagged smis. 453 Condition CheckBothSmi(Register first, Register second); 454 455 // Are both values non-negative tagged smis. 456 Condition CheckBothNonNegativeSmi(Register first, Register second); 457 458 // Are either value a tagged smi. 459 Condition CheckEitherSmi(Register first, 460 Register second, 461 Register scratch = kScratchRegister); 462 463 // Is the value the minimum smi value (since we are using 464 // two's complement numbers, negating the value is known to yield 465 // a non-smi value). 466 Condition CheckIsMinSmi(Register src); 467 468 // Checks whether an 32-bit integer value is a valid for conversion 469 // to a smi. 470 Condition CheckInteger32ValidSmiValue(Register src); 471 472 // Checks whether an 32-bit unsigned integer value is a valid for 473 // conversion to a smi. 474 Condition CheckUInteger32ValidSmiValue(Register src); 475 476 // Check whether src is a Smi, and set dst to zero if it is a smi, 477 // and to one if it isn't. 478 void CheckSmiToIndicator(Register dst, Register src); 479 void CheckSmiToIndicator(Register dst, const Operand& src); 480 481 // Test-and-jump functions. Typically combines a check function 482 // above with a conditional jump. 483 484 // Jump if the value cannot be represented by a smi. 485 void JumpIfNotValidSmiValue(Register src, Label* on_invalid, 486 Label::Distance near_jump = Label::kFar); 487 488 // Jump if the unsigned integer value cannot be represented by a smi. 489 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid, 490 Label::Distance near_jump = Label::kFar); 491 492 // Jump to label if the value is a tagged smi. 493 void JumpIfSmi(Register src, 494 Label* on_smi, 495 Label::Distance near_jump = Label::kFar); 496 497 // Jump to label if the value is not a tagged smi. 498 void JumpIfNotSmi(Register src, 499 Label* on_not_smi, 500 Label::Distance near_jump = Label::kFar); 501 502 // Jump to label if the value is not a non-negative tagged smi. 503 void JumpUnlessNonNegativeSmi(Register src, 504 Label* on_not_smi, 505 Label::Distance near_jump = Label::kFar); 506 507 // Jump to label if the value, which must be a tagged smi, has value equal 508 // to the constant. 509 void JumpIfSmiEqualsConstant(Register src, 510 Smi* constant, 511 Label* on_equals, 512 Label::Distance near_jump = Label::kFar); 513 514 // Jump if either or both register are not smi values. 515 void JumpIfNotBothSmi(Register src1, 516 Register src2, 517 Label* on_not_both_smi, 518 Label::Distance near_jump = Label::kFar); 519 520 // Jump if either or both register are not non-negative smi values. 521 void JumpUnlessBothNonNegativeSmi(Register src1, Register src2, 522 Label* on_not_both_smi, 523 Label::Distance near_jump = Label::kFar); 524 525 // Operations on tagged smi values. 526 527 // Smis represent a subset of integers. The subset is always equivalent to 528 // a two's complement interpretation of a fixed number of bits. 529 530 // Optimistically adds an integer constant to a supposed smi. 531 // If the src is not a smi, or the result is not a smi, jump to 532 // the label. 533 void SmiTryAddConstant(Register dst, 534 Register src, 535 Smi* constant, 536 Label* on_not_smi_result, 537 Label::Distance near_jump = Label::kFar); 538 539 // Add an integer constant to a tagged smi, giving a tagged smi as result. 540 // No overflow testing on the result is done. 541 void SmiAddConstant(Register dst, Register src, Smi* constant); 542 543 // Add an integer constant to a tagged smi, giving a tagged smi as result. 544 // No overflow testing on the result is done. 545 void SmiAddConstant(const Operand& dst, Smi* constant); 546 547 // Add an integer constant to a tagged smi, giving a tagged smi as result, 548 // or jumping to a label if the result cannot be represented by a smi. 549 void SmiAddConstant(Register dst, 550 Register src, 551 Smi* constant, 552 Label* on_not_smi_result, 553 Label::Distance near_jump = Label::kFar); 554 555 // Subtract an integer constant from a tagged smi, giving a tagged smi as 556 // result. No testing on the result is done. Sets the N and Z flags 557 // based on the value of the resulting integer. 558 void SmiSubConstant(Register dst, Register src, Smi* constant); 559 560 // Subtract an integer constant from a tagged smi, giving a tagged smi as 561 // result, or jumping to a label if the result cannot be represented by a smi. 562 void SmiSubConstant(Register dst, 563 Register src, 564 Smi* constant, 565 Label* on_not_smi_result, 566 Label::Distance near_jump = Label::kFar); 567 568 // Negating a smi can give a negative zero or too large positive value. 569 // NOTICE: This operation jumps on success, not failure! 570 void SmiNeg(Register dst, 571 Register src, 572 Label* on_smi_result, 573 Label::Distance near_jump = Label::kFar); 574 575 // Adds smi values and return the result as a smi. 576 // If dst is src1, then src1 will be destroyed, even if 577 // the operation is unsuccessful. 578 void SmiAdd(Register dst, 579 Register src1, 580 Register src2, 581 Label* on_not_smi_result, 582 Label::Distance near_jump = Label::kFar); 583 void SmiAdd(Register dst, 584 Register src1, 585 const Operand& src2, 586 Label* on_not_smi_result, 587 Label::Distance near_jump = Label::kFar); 588 589 void SmiAdd(Register dst, 590 Register src1, 591 Register src2); 592 593 // Subtracts smi values and return the result as a smi. 594 // If dst is src1, then src1 will be destroyed, even if 595 // the operation is unsuccessful. 596 void SmiSub(Register dst, 597 Register src1, 598 Register src2, 599 Label* on_not_smi_result, 600 Label::Distance near_jump = Label::kFar); 601 602 void SmiSub(Register dst, 603 Register src1, 604 Register src2); 605 606 void SmiSub(Register dst, 607 Register src1, 608 const Operand& src2, 609 Label* on_not_smi_result, 610 Label::Distance near_jump = Label::kFar); 611 612 void SmiSub(Register dst, 613 Register src1, 614 const Operand& src2); 615 616 // Multiplies smi values and return the result as a smi, 617 // if possible. 618 // If dst is src1, then src1 will be destroyed, even if 619 // the operation is unsuccessful. 620 void SmiMul(Register dst, 621 Register src1, 622 Register src2, 623 Label* on_not_smi_result, 624 Label::Distance near_jump = Label::kFar); 625 626 // Divides one smi by another and returns the quotient. 627 // Clobbers rax and rdx registers. 628 void SmiDiv(Register dst, 629 Register src1, 630 Register src2, 631 Label* on_not_smi_result, 632 Label::Distance near_jump = Label::kFar); 633 634 // Divides one smi by another and returns the remainder. 635 // Clobbers rax and rdx registers. 636 void SmiMod(Register dst, 637 Register src1, 638 Register src2, 639 Label* on_not_smi_result, 640 Label::Distance near_jump = Label::kFar); 641 642 // Bitwise operations. 643 void SmiNot(Register dst, Register src); 644 void SmiAnd(Register dst, Register src1, Register src2); 645 void SmiOr(Register dst, Register src1, Register src2); 646 void SmiXor(Register dst, Register src1, Register src2); 647 void SmiAndConstant(Register dst, Register src1, Smi* constant); 648 void SmiOrConstant(Register dst, Register src1, Smi* constant); 649 void SmiXorConstant(Register dst, Register src1, Smi* constant); 650 651 void SmiShiftLeftConstant(Register dst, 652 Register src, 653 int shift_value); 654 void SmiShiftLogicalRightConstant(Register dst, 655 Register src, 656 int shift_value, 657 Label* on_not_smi_result, 658 Label::Distance near_jump = Label::kFar); 659 void SmiShiftArithmeticRightConstant(Register dst, 660 Register src, 661 int shift_value); 662 663 // Shifts a smi value to the left, and returns the result if that is a smi. 664 // Uses and clobbers rcx, so dst may not be rcx. 665 void SmiShiftLeft(Register dst, 666 Register src1, 667 Register src2); 668 // Shifts a smi value to the right, shifting in zero bits at the top, and 669 // returns the unsigned intepretation of the result if that is a smi. 670 // Uses and clobbers rcx, so dst may not be rcx. 671 void SmiShiftLogicalRight(Register dst, 672 Register src1, 673 Register src2, 674 Label* on_not_smi_result, 675 Label::Distance near_jump = Label::kFar); 676 // Shifts a smi value to the right, sign extending the top, and 677 // returns the signed intepretation of the result. That will always 678 // be a valid smi value, since it's numerically smaller than the 679 // original. 680 // Uses and clobbers rcx, so dst may not be rcx. 681 void SmiShiftArithmeticRight(Register dst, 682 Register src1, 683 Register src2); 684 685 // Specialized operations 686 687 // Select the non-smi register of two registers where exactly one is a 688 // smi. If neither are smis, jump to the failure label. 689 void SelectNonSmi(Register dst, 690 Register src1, 691 Register src2, 692 Label* on_not_smis, 693 Label::Distance near_jump = Label::kFar); 694 695 // Converts, if necessary, a smi to a combination of number and 696 // multiplier to be used as a scaled index. 697 // The src register contains a *positive* smi value. The shift is the 698 // power of two to multiply the index value by (e.g. 699 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2). 700 // The returned index register may be either src or dst, depending 701 // on what is most efficient. If src and dst are different registers, 702 // src is always unchanged. 703 SmiIndex SmiToIndex(Register dst, Register src, int shift); 704 705 // Converts a positive smi to a negative index. 706 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift); 707 708 // Add the value of a smi in memory to an int32 register. 709 // Sets flags as a normal add. 710 void AddSmiField(Register dst, const Operand& src); 711 712 // Basic Smi operations. 713 void Move(Register dst, Smi* source) { 714 LoadSmiConstant(dst, source); 715 } 716 717 void Move(const Operand& dst, Smi* source) { 718 Register constant = GetSmiConstant(source); 719 movq(dst, constant); 720 } 721 722 void Push(Smi* smi); 723 void Test(const Operand& dst, Smi* source); 724 725 726 // --------------------------------------------------------------------------- 727 // String macros. 728 729 // If object is a string, its map is loaded into object_map. 730 void JumpIfNotString(Register object, 731 Register object_map, 732 Label* not_string, 733 Label::Distance near_jump = Label::kFar); 734 735 736 void JumpIfNotBothSequentialAsciiStrings( 737 Register first_object, 738 Register second_object, 739 Register scratch1, 740 Register scratch2, 741 Label* on_not_both_flat_ascii, 742 Label::Distance near_jump = Label::kFar); 743 744 // Check whether the instance type represents a flat ASCII string. Jump to the 745 // label if not. If the instance type can be scratched specify same register 746 // for both instance type and scratch. 747 void JumpIfInstanceTypeIsNotSequentialAscii( 748 Register instance_type, 749 Register scratch, 750 Label*on_not_flat_ascii_string, 751 Label::Distance near_jump = Label::kFar); 752 753 void JumpIfBothInstanceTypesAreNotSequentialAscii( 754 Register first_object_instance_type, 755 Register second_object_instance_type, 756 Register scratch1, 757 Register scratch2, 758 Label* on_fail, 759 Label::Distance near_jump = Label::kFar); 760 761 // Checks if the given register or operand is a unique name 762 void JumpIfNotUniqueName(Register reg, Label* not_unique_name, 763 Label::Distance distance = Label::kFar); 764 void JumpIfNotUniqueName(Operand operand, Label* not_unique_name, 765 Label::Distance distance = Label::kFar); 766 767 // --------------------------------------------------------------------------- 768 // Macro instructions. 769 770 // Load a register with a long value as efficiently as possible. 771 void Set(Register dst, int64_t x); 772 void Set(const Operand& dst, int64_t x); 773 774 // Move if the registers are not identical. 775 void Move(Register target, Register source); 776 777 // Support for constant splitting. 778 bool IsUnsafeInt(const int x); 779 void SafeMove(Register dst, Smi* src); 780 void SafePush(Smi* src); 781 782 // Bit-field support. 783 void TestBit(const Operand& dst, int bit_index); 784 785 // Handle support 786 void Move(Register dst, Handle<Object> source); 787 void Move(const Operand& dst, Handle<Object> source); 788 void Cmp(Register dst, Handle<Object> source); 789 void Cmp(const Operand& dst, Handle<Object> source); 790 void Cmp(Register dst, Smi* src); 791 void Cmp(const Operand& dst, Smi* src); 792 void Push(Handle<Object> source); 793 794 // Load a heap object and handle the case of new-space objects by 795 // indirecting via a global cell. 796 void LoadHeapObject(Register result, Handle<HeapObject> object); 797 void CmpHeapObject(Register reg, Handle<HeapObject> object); 798 void PushHeapObject(Handle<HeapObject> object); 799 800 void LoadObject(Register result, Handle<Object> object) { 801 AllowDeferredHandleDereference heap_object_check; 802 if (object->IsHeapObject()) { 803 LoadHeapObject(result, Handle<HeapObject>::cast(object)); 804 } else { 805 Move(result, object); 806 } 807 } 808 809 void CmpObject(Register reg, Handle<Object> object) { 810 AllowDeferredHandleDereference heap_object_check; 811 if (object->IsHeapObject()) { 812 CmpHeapObject(reg, Handle<HeapObject>::cast(object)); 813 } else { 814 Cmp(reg, object); 815 } 816 } 817 818 // Load a global cell into a register. 819 void LoadGlobalCell(Register dst, Handle<Cell> cell); 820 821 // Emit code to discard a non-negative number of pointer-sized elements 822 // from the stack, clobbering only the rsp register. 823 void Drop(int stack_elements); 824 825 void Call(Label* target) { call(target); } 826 void Push(Register src) { push(src); } 827 void Pop(Register dst) { pop(dst); } 828 void PushReturnAddressFrom(Register src) { push(src); } 829 void PopReturnAddressTo(Register dst) { pop(dst); } 830 831 // Control Flow 832 void Jump(Address destination, RelocInfo::Mode rmode); 833 void Jump(ExternalReference ext); 834 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode); 835 836 void Call(Address destination, RelocInfo::Mode rmode); 837 void Call(ExternalReference ext); 838 void Call(Handle<Code> code_object, 839 RelocInfo::Mode rmode, 840 TypeFeedbackId ast_id = TypeFeedbackId::None()); 841 842 // The size of the code generated for different call instructions. 843 int CallSize(Address destination, RelocInfo::Mode rmode) { 844 return kCallSequenceLength; 845 } 846 int CallSize(ExternalReference ext); 847 int CallSize(Handle<Code> code_object) { 848 // Code calls use 32-bit relative addressing. 849 return kShortCallInstructionLength; 850 } 851 int CallSize(Register target) { 852 // Opcode: REX_opt FF /2 m64 853 return (target.high_bit() != 0) ? 3 : 2; 854 } 855 int CallSize(const Operand& target) { 856 // Opcode: REX_opt FF /2 m64 857 return (target.requires_rex() ? 2 : 1) + target.operand_size(); 858 } 859 860 // Emit call to the code we are currently generating. 861 void CallSelf() { 862 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); 863 Call(self, RelocInfo::CODE_TARGET); 864 } 865 866 // Non-x64 instructions. 867 // Push/pop all general purpose registers. 868 // Does not push rsp/rbp nor any of the assembler's special purpose registers 869 // (kScratchRegister, kSmiConstantRegister, kRootRegister). 870 void Pushad(); 871 void Popad(); 872 // Sets the stack as after performing Popad, without actually loading the 873 // registers. 874 void Dropad(); 875 876 // Compare object type for heap object. 877 // Always use unsigned comparisons: above and below, not less and greater. 878 // Incoming register is heap_object and outgoing register is map. 879 // They may be the same register, and may be kScratchRegister. 880 void CmpObjectType(Register heap_object, InstanceType type, Register map); 881 882 // Compare instance type for map. 883 // Always use unsigned comparisons: above and below, not less and greater. 884 void CmpInstanceType(Register map, InstanceType type); 885 886 // Check if a map for a JSObject indicates that the object has fast elements. 887 // Jump to the specified label if it does not. 888 void CheckFastElements(Register map, 889 Label* fail, 890 Label::Distance distance = Label::kFar); 891 892 // Check if a map for a JSObject indicates that the object can have both smi 893 // and HeapObject elements. Jump to the specified label if it does not. 894 void CheckFastObjectElements(Register map, 895 Label* fail, 896 Label::Distance distance = Label::kFar); 897 898 // Check if a map for a JSObject indicates that the object has fast smi only 899 // elements. Jump to the specified label if it does not. 900 void CheckFastSmiElements(Register map, 901 Label* fail, 902 Label::Distance distance = Label::kFar); 903 904 // Check to see if maybe_number can be stored as a double in 905 // FastDoubleElements. If it can, store it at the index specified by index in 906 // the FastDoubleElements array elements, otherwise jump to fail. Note that 907 // index must not be smi-tagged. 908 void StoreNumberToDoubleElements(Register maybe_number, 909 Register elements, 910 Register index, 911 XMMRegister xmm_scratch, 912 Label* fail, 913 int elements_offset = 0); 914 915 // Compare an object's map with the specified map and its transitioned 916 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with 917 // result of map compare. If multiple map compares are required, the compare 918 // sequences branches to early_success. 919 void CompareMap(Register obj, 920 Handle<Map> map, 921 Label* early_success); 922 923 // Check if the map of an object is equal to a specified map and branch to 924 // label if not. Skip the smi check if not required (object is known to be a 925 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 926 // against maps that are ElementsKind transition maps of the specified map. 927 void CheckMap(Register obj, 928 Handle<Map> map, 929 Label* fail, 930 SmiCheckType smi_check_type); 931 932 // Check if the map of an object is equal to a specified map and branch to a 933 // specified target if equal. Skip the smi check if not required (object is 934 // known to be a heap object) 935 void DispatchMap(Register obj, 936 Register unused, 937 Handle<Map> map, 938 Handle<Code> success, 939 SmiCheckType smi_check_type); 940 941 // Check if the object in register heap_object is a string. Afterwards the 942 // register map contains the object map and the register instance_type 943 // contains the instance_type. The registers map and instance_type can be the 944 // same in which case it contains the instance type afterwards. Either of the 945 // registers map and instance_type can be the same as heap_object. 946 Condition IsObjectStringType(Register heap_object, 947 Register map, 948 Register instance_type); 949 950 // Check if the object in register heap_object is a name. Afterwards the 951 // register map contains the object map and the register instance_type 952 // contains the instance_type. The registers map and instance_type can be the 953 // same in which case it contains the instance type afterwards. Either of the 954 // registers map and instance_type can be the same as heap_object. 955 Condition IsObjectNameType(Register heap_object, 956 Register map, 957 Register instance_type); 958 959 // FCmp compares and pops the two values on top of the FPU stack. 960 // The flag results are similar to integer cmp, but requires unsigned 961 // jcc instructions (je, ja, jae, jb, jbe, je, and jz). 962 void FCmp(); 963 964 void ClampUint8(Register reg); 965 966 void ClampDoubleToUint8(XMMRegister input_reg, 967 XMMRegister temp_xmm_reg, 968 Register result_reg); 969 970 void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch); 971 972 void LoadInstanceDescriptors(Register map, Register descriptors); 973 void EnumLength(Register dst, Register map); 974 void NumberOfOwnDescriptors(Register dst, Register map); 975 976 template<typename Field> 977 void DecodeField(Register reg) { 978 static const int shift = Field::kShift + kSmiShift; 979 static const int mask = Field::kMask >> Field::kShift; 980 shr(reg, Immediate(shift)); 981 and_(reg, Immediate(mask)); 982 shl(reg, Immediate(kSmiShift)); 983 } 984 985 // Abort execution if argument is not a number, enabled via --debug-code. 986 void AssertNumber(Register object); 987 988 // Abort execution if argument is a smi, enabled via --debug-code. 989 void AssertNotSmi(Register object); 990 991 // Abort execution if argument is not a smi, enabled via --debug-code. 992 void AssertSmi(Register object); 993 void AssertSmi(const Operand& object); 994 995 // Abort execution if a 64 bit register containing a 32 bit payload does not 996 // have zeros in the top 32 bits, enabled via --debug-code. 997 void AssertZeroExtended(Register reg); 998 999 // Abort execution if argument is not a string, enabled via --debug-code. 1000 void AssertString(Register object); 1001 1002 // Abort execution if argument is not a name, enabled via --debug-code. 1003 void AssertName(Register object); 1004 1005 // Abort execution if argument is not the root value with the given index, 1006 // enabled via --debug-code. 1007 void AssertRootValue(Register src, 1008 Heap::RootListIndex root_value_index, 1009 BailoutReason reason); 1010 1011 // --------------------------------------------------------------------------- 1012 // Exception handling 1013 1014 // Push a new try handler and link it into try handler chain. 1015 void PushTryHandler(StackHandler::Kind kind, int handler_index); 1016 1017 // Unlink the stack handler on top of the stack from the try handler chain. 1018 void PopTryHandler(); 1019 1020 // Activate the top handler in the try hander chain and pass the 1021 // thrown value. 1022 void Throw(Register value); 1023 1024 // Propagate an uncatchable exception out of the current JS stack. 1025 void ThrowUncatchable(Register value); 1026 1027 // --------------------------------------------------------------------------- 1028 // Inline caching support 1029 1030 // Generate code for checking access rights - used for security checks 1031 // on access to global objects across environments. The holder register 1032 // is left untouched, but the scratch register and kScratchRegister, 1033 // which must be different, are clobbered. 1034 void CheckAccessGlobalProxy(Register holder_reg, 1035 Register scratch, 1036 Label* miss); 1037 1038 void GetNumberHash(Register r0, Register scratch); 1039 1040 void LoadFromNumberDictionary(Label* miss, 1041 Register elements, 1042 Register key, 1043 Register r0, 1044 Register r1, 1045 Register r2, 1046 Register result); 1047 1048 1049 // --------------------------------------------------------------------------- 1050 // Allocation support 1051 1052 // Allocate an object in new space or old pointer space. If the given space 1053 // is exhausted control continues at the gc_required label. The allocated 1054 // object is returned in result and end of the new object is returned in 1055 // result_end. The register scratch can be passed as no_reg in which case 1056 // an additional object reference will be added to the reloc info. The 1057 // returned pointers in result and result_end have not yet been tagged as 1058 // heap objects. If result_contains_top_on_entry is true the content of 1059 // result is known to be the allocation top on entry (could be result_end 1060 // from a previous call). If result_contains_top_on_entry is true scratch 1061 // should be no_reg as it is never used. 1062 void Allocate(int object_size, 1063 Register result, 1064 Register result_end, 1065 Register scratch, 1066 Label* gc_required, 1067 AllocationFlags flags); 1068 1069 void Allocate(int header_size, 1070 ScaleFactor element_size, 1071 Register element_count, 1072 Register result, 1073 Register result_end, 1074 Register scratch, 1075 Label* gc_required, 1076 AllocationFlags flags); 1077 1078 void Allocate(Register object_size, 1079 Register result, 1080 Register result_end, 1081 Register scratch, 1082 Label* gc_required, 1083 AllocationFlags flags); 1084 1085 // Undo allocation in new space. The object passed and objects allocated after 1086 // it will no longer be allocated. Make sure that no pointers are left to the 1087 // object(s) no longer allocated as they would be invalid when allocation is 1088 // un-done. 1089 void UndoAllocationInNewSpace(Register object); 1090 1091 // Allocate a heap number in new space with undefined value. Returns 1092 // tagged pointer in result register, or jumps to gc_required if new 1093 // space is full. 1094 void AllocateHeapNumber(Register result, 1095 Register scratch, 1096 Label* gc_required); 1097 1098 // Allocate a sequential string. All the header fields of the string object 1099 // are initialized. 1100 void AllocateTwoByteString(Register result, 1101 Register length, 1102 Register scratch1, 1103 Register scratch2, 1104 Register scratch3, 1105 Label* gc_required); 1106 void AllocateAsciiString(Register result, 1107 Register length, 1108 Register scratch1, 1109 Register scratch2, 1110 Register scratch3, 1111 Label* gc_required); 1112 1113 // Allocate a raw cons string object. Only the map field of the result is 1114 // initialized. 1115 void AllocateTwoByteConsString(Register result, 1116 Register scratch1, 1117 Register scratch2, 1118 Label* gc_required); 1119 void AllocateAsciiConsString(Register result, 1120 Register scratch1, 1121 Register scratch2, 1122 Label* gc_required); 1123 1124 // Allocate a raw sliced string object. Only the map field of the result is 1125 // initialized. 1126 void AllocateTwoByteSlicedString(Register result, 1127 Register scratch1, 1128 Register scratch2, 1129 Label* gc_required); 1130 void AllocateAsciiSlicedString(Register result, 1131 Register scratch1, 1132 Register scratch2, 1133 Label* gc_required); 1134 1135 // --------------------------------------------------------------------------- 1136 // Support functions. 1137 1138 // Check if result is zero and op is negative. 1139 void NegativeZeroTest(Register result, Register op, Label* then_label); 1140 1141 // Check if result is zero and op is negative in code using jump targets. 1142 void NegativeZeroTest(CodeGenerator* cgen, 1143 Register result, 1144 Register op, 1145 JumpTarget* then_target); 1146 1147 // Check if result is zero and any of op1 and op2 are negative. 1148 // Register scratch is destroyed, and it must be different from op2. 1149 void NegativeZeroTest(Register result, Register op1, Register op2, 1150 Register scratch, Label* then_label); 1151 1152 // Try to get function prototype of a function and puts the value in 1153 // the result register. Checks that the function really is a 1154 // function and jumps to the miss label if the fast checks fail. The 1155 // function register will be untouched; the other register may be 1156 // clobbered. 1157 void TryGetFunctionPrototype(Register function, 1158 Register result, 1159 Label* miss, 1160 bool miss_on_bound_function = false); 1161 1162 // Generates code for reporting that an illegal operation has 1163 // occurred. 1164 void IllegalOperation(int num_arguments); 1165 1166 // Picks out an array index from the hash field. 1167 // Register use: 1168 // hash - holds the index's hash. Clobbered. 1169 // index - holds the overwritten index on exit. 1170 void IndexFromHash(Register hash, Register index); 1171 1172 // Find the function context up the context chain. 1173 void LoadContext(Register dst, int context_chain_length); 1174 1175 // Conditionally load the cached Array transitioned map of type 1176 // transitioned_kind from the native context if the map in register 1177 // map_in_out is the cached Array map in the native context of 1178 // expected_kind. 1179 void LoadTransitionedArrayMapConditional( 1180 ElementsKind expected_kind, 1181 ElementsKind transitioned_kind, 1182 Register map_in_out, 1183 Register scratch, 1184 Label* no_map_match); 1185 1186 // Load the initial map for new Arrays from a JSFunction. 1187 void LoadInitialArrayMap(Register function_in, 1188 Register scratch, 1189 Register map_out, 1190 bool can_have_holes); 1191 1192 // Load the global function with the given index. 1193 void LoadGlobalFunction(int index, Register function); 1194 void LoadArrayFunction(Register function); 1195 1196 // Load the initial map from the global function. The registers 1197 // function and map can be the same. 1198 void LoadGlobalFunctionInitialMap(Register function, Register map); 1199 1200 // --------------------------------------------------------------------------- 1201 // Runtime calls 1202 1203 // Call a code stub. 1204 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); 1205 1206 // Tail call a code stub (jump). 1207 void TailCallStub(CodeStub* stub); 1208 1209 // Return from a code stub after popping its arguments. 1210 void StubReturn(int argc); 1211 1212 // Call a runtime routine. 1213 void CallRuntime(const Runtime::Function* f, int num_arguments); 1214 1215 // Call a runtime function and save the value of XMM registers. 1216 void CallRuntimeSaveDoubles(Runtime::FunctionId id); 1217 1218 // Convenience function: Same as above, but takes the fid instead. 1219 void CallRuntime(Runtime::FunctionId id, int num_arguments); 1220 1221 // Convenience function: call an external reference. 1222 void CallExternalReference(const ExternalReference& ext, 1223 int num_arguments); 1224 1225 // Tail call of a runtime routine (jump). 1226 // Like JumpToExternalReference, but also takes care of passing the number 1227 // of parameters. 1228 void TailCallExternalReference(const ExternalReference& ext, 1229 int num_arguments, 1230 int result_size); 1231 1232 // Convenience function: tail call a runtime routine (jump). 1233 void TailCallRuntime(Runtime::FunctionId fid, 1234 int num_arguments, 1235 int result_size); 1236 1237 // Jump to a runtime routine. 1238 void JumpToExternalReference(const ExternalReference& ext, int result_size); 1239 1240 // Prepares stack to put arguments (aligns and so on). WIN64 calling 1241 // convention requires to put the pointer to the return value slot into 1242 // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves 1243 // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize 1244 // inside the exit frame (not GCed) accessible via StackSpaceOperand. 1245 void PrepareCallApiFunction(int arg_stack_space, bool returns_handle); 1246 1247 // Calls an API function. Allocates HandleScope, extracts returned value 1248 // from handle and propagates exceptions. Clobbers r14, r15, rbx and 1249 // caller-save registers. Restores context. On return removes 1250 // stack_space * kPointerSize (GCed). 1251 void CallApiFunctionAndReturn(Address function_address, 1252 Address thunk_address, 1253 Register thunk_last_arg, 1254 int stack_space, 1255 bool returns_handle, 1256 int return_value_offset_from_rbp); 1257 1258 // Before calling a C-function from generated code, align arguments on stack. 1259 // After aligning the frame, arguments must be stored in rsp[0], rsp[8], 1260 // etc., not pushed. The argument count assumes all arguments are word sized. 1261 // The number of slots reserved for arguments depends on platform. On Windows 1262 // stack slots are reserved for the arguments passed in registers. On other 1263 // platforms stack slots are only reserved for the arguments actually passed 1264 // on the stack. 1265 void PrepareCallCFunction(int num_arguments); 1266 1267 // Calls a C function and cleans up the space for arguments allocated 1268 // by PrepareCallCFunction. The called function is not allowed to trigger a 1269 // garbage collection, since that might move the code and invalidate the 1270 // return address (unless this is somehow accounted for by the called 1271 // function). 1272 void CallCFunction(ExternalReference function, int num_arguments); 1273 void CallCFunction(Register function, int num_arguments); 1274 1275 // Calculate the number of stack slots to reserve for arguments when calling a 1276 // C function. 1277 int ArgumentStackSlotsForCFunctionCall(int num_arguments); 1278 1279 // --------------------------------------------------------------------------- 1280 // Utilities 1281 1282 void Ret(); 1283 1284 // Return and drop arguments from stack, where the number of arguments 1285 // may be bigger than 2^16 - 1. Requires a scratch register. 1286 void Ret(int bytes_dropped, Register scratch); 1287 1288 Handle<Object> CodeObject() { 1289 ASSERT(!code_object_.is_null()); 1290 return code_object_; 1291 } 1292 1293 // Copy length bytes from source to destination. 1294 // Uses scratch register internally (if you have a low-eight register 1295 // free, do use it, otherwise kScratchRegister will be used). 1296 // The min_length is a minimum limit on the value that length will have. 1297 // The algorithm has some special cases that might be omitted if the string 1298 // is known to always be long. 1299 void CopyBytes(Register destination, 1300 Register source, 1301 Register length, 1302 int min_length = 0, 1303 Register scratch = kScratchRegister); 1304 1305 // Initialize fields with filler values. Fields starting at |start_offset| 1306 // not including end_offset are overwritten with the value in |filler|. At 1307 // the end the loop, |start_offset| takes the value of |end_offset|. 1308 void InitializeFieldsWithFiller(Register start_offset, 1309 Register end_offset, 1310 Register filler); 1311 1312 1313 // --------------------------------------------------------------------------- 1314 // StatsCounter support 1315 1316 void SetCounter(StatsCounter* counter, int value); 1317 void IncrementCounter(StatsCounter* counter, int value); 1318 void DecrementCounter(StatsCounter* counter, int value); 1319 1320 1321 // --------------------------------------------------------------------------- 1322 // Debugging 1323 1324 // Calls Abort(msg) if the condition cc is not satisfied. 1325 // Use --debug_code to enable. 1326 void Assert(Condition cc, BailoutReason reason); 1327 1328 void AssertFastElements(Register elements); 1329 1330 // Like Assert(), but always enabled. 1331 void Check(Condition cc, BailoutReason reason); 1332 1333 // Print a message to stdout and abort execution. 1334 void Abort(BailoutReason msg); 1335 1336 // Check that the stack is aligned. 1337 void CheckStackAlignment(); 1338 1339 // Verify restrictions about code generated in stubs. 1340 void set_generating_stub(bool value) { generating_stub_ = value; } 1341 bool generating_stub() { return generating_stub_; } 1342 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } 1343 bool allow_stub_calls() { return allow_stub_calls_; } 1344 void set_has_frame(bool value) { has_frame_ = value; } 1345 bool has_frame() { return has_frame_; } 1346 inline bool AllowThisStubCall(CodeStub* stub); 1347 1348 static int SafepointRegisterStackIndex(Register reg) { 1349 return SafepointRegisterStackIndex(reg.code()); 1350 } 1351 1352 // Activation support. 1353 void EnterFrame(StackFrame::Type type); 1354 void LeaveFrame(StackFrame::Type type); 1355 1356 // Expects object in rax and returns map with validated enum cache 1357 // in rax. Assumes that any other register can be used as a scratch. 1358 void CheckEnumCache(Register null_value, 1359 Label* call_runtime); 1360 1361 // AllocationMemento support. Arrays may have an associated 1362 // AllocationMemento object that can be checked for in order to pretransition 1363 // to another type. 1364 // On entry, receiver_reg should point to the array object. 1365 // scratch_reg gets clobbered. 1366 // If allocation info is present, condition flags are set to equal 1367 void TestJSArrayForAllocationMemento(Register receiver_reg, 1368 Register scratch_reg); 1369 1370 private: 1371 // Order general registers are pushed by Pushad. 1372 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. 1373 static const int kSafepointPushRegisterIndices[Register::kNumRegisters]; 1374 static const int kNumSafepointSavedRegisters = 11; 1375 static const int kSmiShift = kSmiTagSize + kSmiShiftSize; 1376 1377 bool generating_stub_; 1378 bool allow_stub_calls_; 1379 bool has_frame_; 1380 bool root_array_available_; 1381 1382 // Returns a register holding the smi value. The register MUST NOT be 1383 // modified. It may be the "smi 1 constant" register. 1384 Register GetSmiConstant(Smi* value); 1385 1386 intptr_t RootRegisterDelta(ExternalReference other); 1387 1388 // Moves the smi value to the destination register. 1389 void LoadSmiConstant(Register dst, Smi* value); 1390 1391 // This handle will be patched with the code object on installation. 1392 Handle<Object> code_object_; 1393 1394 // Helper functions for generating invokes. 1395 void InvokePrologue(const ParameterCount& expected, 1396 const ParameterCount& actual, 1397 Handle<Code> code_constant, 1398 Register code_register, 1399 Label* done, 1400 bool* definitely_mismatches, 1401 InvokeFlag flag, 1402 Label::Distance near_jump = Label::kFar, 1403 const CallWrapper& call_wrapper = NullCallWrapper(), 1404 CallKind call_kind = CALL_AS_METHOD); 1405 1406 void EnterExitFramePrologue(bool save_rax); 1407 1408 // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack 1409 // accessible via StackSpaceOperand. 1410 void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles); 1411 1412 void LeaveExitFrameEpilogue(); 1413 1414 // Allocation support helpers. 1415 // Loads the top of new-space into the result register. 1416 // Otherwise the address of the new-space top is loaded into scratch (if 1417 // scratch is valid), and the new-space top is loaded into result. 1418 void LoadAllocationTopHelper(Register result, 1419 Register scratch, 1420 AllocationFlags flags); 1421 1422 // Update allocation top with value in result_end register. 1423 // If scratch is valid, it contains the address of the allocation top. 1424 void UpdateAllocationTopHelper(Register result_end, 1425 Register scratch, 1426 AllocationFlags flags); 1427 1428 // Helper for PopHandleScope. Allowed to perform a GC and returns 1429 // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and 1430 // possibly returns a failure object indicating an allocation failure. 1431 Object* PopHandleScopeHelper(Register saved, 1432 Register scratch, 1433 bool gc_allowed); 1434 1435 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1436 void InNewSpace(Register object, 1437 Register scratch, 1438 Condition cc, 1439 Label* branch, 1440 Label::Distance distance = Label::kFar); 1441 1442 // Helper for finding the mark bits for an address. Afterwards, the 1443 // bitmap register points at the word with the mark bits and the mask 1444 // the position of the first bit. Uses rcx as scratch and leaves addr_reg 1445 // unchanged. 1446 inline void GetMarkBits(Register addr_reg, 1447 Register bitmap_reg, 1448 Register mask_reg); 1449 1450 // Helper for throwing exceptions. Compute a handler address and jump to 1451 // it. See the implementation for register usage. 1452 void JumpToHandlerEntry(); 1453 1454 // Compute memory operands for safepoint stack slots. 1455 Operand SafepointRegisterSlot(Register reg); 1456 static int SafepointRegisterStackIndex(int reg_code) { 1457 return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1; 1458 } 1459 1460 // Needs access to SafepointRegisterStackIndex for compiled frame 1461 // traversal. 1462 friend class StandardFrame; 1463 }; 1464 1465 1466 // The code patcher is used to patch (typically) small parts of code e.g. for 1467 // debugging and other types of instrumentation. When using the code patcher 1468 // the exact number of bytes specified must be emitted. Is not legal to emit 1469 // relocation information. If any of these constraints are violated it causes 1470 // an assertion. 1471 class CodePatcher { 1472 public: 1473 CodePatcher(byte* address, int size); 1474 virtual ~CodePatcher(); 1475 1476 // Macro assembler to emit code. 1477 MacroAssembler* masm() { return &masm_; } 1478 1479 private: 1480 byte* address_; // The address of the code being patched. 1481 int size_; // Number of bytes of the expected patch size. 1482 MacroAssembler masm_; // Macro assembler used to generate the code. 1483 }; 1484 1485 1486 // ----------------------------------------------------------------------------- 1487 // Static helper functions. 1488 1489 // Generate an Operand for loading a field from an object. 1490 inline Operand FieldOperand(Register object, int offset) { 1491 return Operand(object, offset - kHeapObjectTag); 1492 } 1493 1494 1495 // Generate an Operand for loading an indexed field from an object. 1496 inline Operand FieldOperand(Register object, 1497 Register index, 1498 ScaleFactor scale, 1499 int offset) { 1500 return Operand(object, index, scale, offset - kHeapObjectTag); 1501 } 1502 1503 1504 inline Operand ContextOperand(Register context, int index) { 1505 return Operand(context, Context::SlotOffset(index)); 1506 } 1507 1508 1509 inline Operand GlobalObjectOperand() { 1510 return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX); 1511 } 1512 1513 1514 // Provides access to exit frame stack space (not GCed). 1515 inline Operand StackSpaceOperand(int index) { 1516 #ifdef _WIN64 1517 const int kShaddowSpace = 4; 1518 return Operand(rsp, (index + kShaddowSpace) * kPointerSize); 1519 #else 1520 return Operand(rsp, index * kPointerSize); 1521 #endif 1522 } 1523 1524 1525 inline Operand StackOperandForReturnAddress(int32_t disp) { 1526 return Operand(rsp, disp); 1527 } 1528 1529 1530 #ifdef GENERATED_CODE_COVERAGE 1531 extern void LogGeneratedCodeCoverage(const char* file_line); 1532 #define CODE_COVERAGE_STRINGIFY(x) #x 1533 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1534 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1535 #define ACCESS_MASM(masm) { \ 1536 Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \ 1537 masm->pushfq(); \ 1538 masm->Pushad(); \ 1539 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ 1540 masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \ 1541 masm->pop(rax); \ 1542 masm->Popad(); \ 1543 masm->popfq(); \ 1544 } \ 1545 masm-> 1546 #else 1547 #define ACCESS_MASM(masm) masm-> 1548 #endif 1549 1550 } } // namespace v8::internal 1551 1552 #endif // V8_X64_MACRO_ASSEMBLER_X64_H_ 1553