1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ 6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ 7 8 #include "src/assembler.h" 9 #include "src/globals.h" 10 #include "src/mips64/assembler-mips64.h" 11 12 namespace v8 { 13 namespace internal { 14 15 // Give alias names to registers for calling conventions. 16 const Register kReturnRegister0 = {Register::kCode_v0}; 17 const Register kReturnRegister1 = {Register::kCode_v1}; 18 const Register kReturnRegister2 = {Register::kCode_a0}; 19 const Register kJSFunctionRegister = {Register::kCode_a1}; 20 const Register kContextRegister = {Register::kCpRegister}; 21 const Register kAllocateSizeRegister = {Register::kCode_a0}; 22 const Register kInterpreterAccumulatorRegister = {Register::kCode_v0}; 23 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0}; 24 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1}; 25 const Register kInterpreterDispatchTableRegister = {Register::kCode_t2}; 26 const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0}; 27 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3}; 28 const Register kRuntimeCallFunctionRegister = {Register::kCode_a1}; 29 const Register kRuntimeCallArgCountRegister = {Register::kCode_a0}; 30 31 // Forward declaration. 32 class JumpTarget; 33 34 // Reserved Register Usage Summary. 35 // 36 // Registers t8, t9, and at are reserved for use by the MacroAssembler. 37 // 38 // The programmer should know that the MacroAssembler may clobber these three, 39 // but won't touch other registers except in special cases. 40 // 41 // Per the MIPS ABI, register t9 must be used for indirect function call 42 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when 43 // trying to update gp register for position-independent-code. Whenever 44 // MIPS generated code calls C code, it must be via t9 register. 45 46 47 // Flags used for LeaveExitFrame function. 48 enum LeaveExitFrameMode { 49 EMIT_RETURN = true, 50 NO_EMIT_RETURN = false 51 }; 52 53 // Flags used for AllocateHeapNumber 54 enum TaggingMode { 55 // Tag the result. 56 TAG_RESULT, 57 // Don't tag 58 DONT_TAG_RESULT 59 }; 60 61 // Flags used for the ObjectToDoubleFPURegister function. 62 enum ObjectToDoubleFlags { 63 // No special flags. 64 NO_OBJECT_TO_DOUBLE_FLAGS = 0, 65 // Object is known to be a non smi. 66 OBJECT_NOT_SMI = 1 << 0, 67 // Don't load NaNs or infinities, branch to the non number case instead. 68 AVOID_NANS_AND_INFINITIES = 1 << 1 69 }; 70 71 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. 72 enum BranchDelaySlot { 73 USE_DELAY_SLOT, 74 PROTECT 75 }; 76 77 // Flags used for the li macro-assembler function. 78 enum LiFlags { 79 // If the constant value can be represented in just 16 bits, then 80 // optimize the li to use a single instruction, rather than lui/ori/dsll 81 // sequence. 82 OPTIMIZE_SIZE = 0, 83 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant 84 // could be loaded with just one, so that this value is patchable later. 85 CONSTANT_SIZE = 1, 86 // For address loads only 4 instruction are required. Used to mark 87 // constant load that will be used as address without relocation 88 // information. It ensures predictable code size, so specific sites 89 // in code are patchable. 90 ADDRESS_LOAD = 2 91 }; 92 93 94 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 95 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 96 enum PointersToHereCheck { 97 kPointersToHereMaybeInteresting, 98 kPointersToHereAreAlwaysInteresting 99 }; 100 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; 101 102 Register GetRegisterThatIsNotOneOf(Register reg1, 103 Register reg2 = no_reg, 104 Register reg3 = no_reg, 105 Register reg4 = no_reg, 106 Register reg5 = no_reg, 107 Register reg6 = no_reg); 108 109 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg, 110 Register reg4 = no_reg, Register reg5 = no_reg, 111 Register reg6 = no_reg, Register reg7 = no_reg, 112 Register reg8 = no_reg, Register reg9 = no_reg, 113 Register reg10 = no_reg); 114 115 116 // ----------------------------------------------------------------------------- 117 // Static helper functions. 118 119 #if defined(V8_TARGET_LITTLE_ENDIAN) 120 #define SmiWordOffset(offset) (offset + kPointerSize / 2) 121 #else 122 #define SmiWordOffset(offset) offset 123 #endif 124 125 126 inline MemOperand ContextMemOperand(Register context, int index) { 127 return MemOperand(context, Context::SlotOffset(index)); 128 } 129 130 131 inline MemOperand NativeContextMemOperand() { 132 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX); 133 } 134 135 136 // Generate a MemOperand for loading a field from an object. 137 inline MemOperand FieldMemOperand(Register object, int offset) { 138 return MemOperand(object, offset - kHeapObjectTag); 139 } 140 141 142 inline MemOperand UntagSmiMemOperand(Register rm, int offset) { 143 // Assumes that Smis are shifted by 32 bits. 144 STATIC_ASSERT(kSmiShift == 32); 145 return MemOperand(rm, SmiWordOffset(offset)); 146 } 147 148 149 inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) { 150 return UntagSmiMemOperand(rm, offset - kHeapObjectTag); 151 } 152 153 154 // Generate a MemOperand for storing arguments 5..N on the stack 155 // when calling CallCFunction(). 156 // TODO(plind): Currently ONLY used for O32. Should be fixed for 157 // n64, and used in RegExp code, and other places 158 // with more than 8 arguments. 159 inline MemOperand CFunctionArgumentOperand(int index) { 160 DCHECK(index > kCArgSlotCount); 161 // Argument 5 takes the slot just past the four Arg-slots. 162 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; 163 return MemOperand(sp, offset); 164 } 165 166 167 // MacroAssembler implements a collection of frequently used macros. 168 class MacroAssembler: public Assembler { 169 public: 170 MacroAssembler(Isolate* isolate, void* buffer, int size, 171 CodeObjectRequired create_code_object); 172 173 // Arguments macros. 174 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 175 #define COND_ARGS cond, r1, r2 176 177 // Cases when relocation is not needed. 178 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ 179 void Name(target_type target, BranchDelaySlot bd = PROTECT); \ 180 inline void Name(BranchDelaySlot bd, target_type target) { \ 181 Name(target, bd); \ 182 } \ 183 void Name(target_type target, \ 184 COND_TYPED_ARGS, \ 185 BranchDelaySlot bd = PROTECT); \ 186 inline void Name(BranchDelaySlot bd, \ 187 target_type target, \ 188 COND_TYPED_ARGS) { \ 189 Name(target, COND_ARGS, bd); \ 190 } 191 192 #define DECLARE_BRANCH_PROTOTYPES(Name) \ 193 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ 194 DECLARE_NORELOC_PROTOTYPE(Name, int32_t) 195 196 DECLARE_BRANCH_PROTOTYPES(Branch) 197 DECLARE_BRANCH_PROTOTYPES(BranchAndLink) 198 DECLARE_BRANCH_PROTOTYPES(BranchShort) 199 200 #undef DECLARE_BRANCH_PROTOTYPES 201 #undef COND_TYPED_ARGS 202 #undef COND_ARGS 203 204 205 // Jump, Call, and Ret pseudo instructions implementing inter-working. 206 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \ 207 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT 208 209 void Jump(Register target, COND_ARGS); 210 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); 211 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); 212 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); 213 static int CallSize(Register target, COND_ARGS); 214 void Call(Register target, COND_ARGS); 215 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); 216 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); 217 int CallSize(Handle<Code> code, 218 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 219 TypeFeedbackId ast_id = TypeFeedbackId::None(), 220 COND_ARGS); 221 void Call(Handle<Code> code, 222 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 223 TypeFeedbackId ast_id = TypeFeedbackId::None(), 224 COND_ARGS); 225 void Ret(COND_ARGS); 226 inline void Ret(BranchDelaySlot bd, Condition cond = al, 227 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) { 228 Ret(cond, rs, rt, bd); 229 } 230 231 bool IsNear(Label* L, Condition cond, int rs_reg); 232 233 void Branch(Label* L, 234 Condition cond, 235 Register rs, 236 Heap::RootListIndex index, 237 BranchDelaySlot bdslot = PROTECT); 238 239 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a 240 // functor/function with 'Label *func(size_t index)' declaration. 241 template <typename Func> 242 void GenerateSwitchTable(Register index, size_t case_count, 243 Func GetLabelFunction); 244 #undef COND_ARGS 245 246 // Emit code that loads |parameter_index|'th parameter from the stack to 247 // the register according to the CallInterfaceDescriptor definition. 248 // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed 249 // below the caller's sp. 250 template <class Descriptor> 251 void LoadParameterFromStack( 252 Register reg, typename Descriptor::ParameterIndices parameter_index, 253 int sp_to_ra_offset_in_words = 0) { 254 DCHECK(Descriptor::kPassLastArgsOnStack); 255 UNIMPLEMENTED(); 256 } 257 258 // Emit code to discard a non-negative number of pointer-sized elements 259 // from the stack, clobbering only the sp register. 260 void Drop(int count, 261 Condition cond = cc_always, 262 Register reg = no_reg, 263 const Operand& op = Operand(no_reg)); 264 265 // Trivial case of DropAndRet that utilizes the delay slot and only emits 266 // 2 instructions. 267 void DropAndRet(int drop); 268 269 void DropAndRet(int drop, 270 Condition cond, 271 Register reg, 272 const Operand& op); 273 274 // Swap two registers. If the scratch register is omitted then a slightly 275 // less efficient form using xor instead of mov is emitted. 276 void Swap(Register reg1, Register reg2, Register scratch = no_reg); 277 278 void Call(Label* target); 279 280 inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); } 281 inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); } 282 283 inline void Move(Register dst, Register src) { 284 if (!dst.is(src)) { 285 mov(dst, src); 286 } 287 } 288 289 inline void Move_d(FPURegister dst, FPURegister src) { 290 if (!dst.is(src)) { 291 mov_d(dst, src); 292 } 293 } 294 295 inline void Move_s(FPURegister dst, FPURegister src) { 296 if (!dst.is(src)) { 297 mov_s(dst, src); 298 } 299 } 300 301 inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } 302 303 inline void Move(Register dst_low, Register dst_high, FPURegister src) { 304 mfc1(dst_low, src); 305 mfhc1(dst_high, src); 306 } 307 308 inline void Move(Register dst, FPURegister src) { dmfc1(dst, src); } 309 310 inline void Move(FPURegister dst, Register src) { dmtc1(src, dst); } 311 312 inline void FmoveHigh(Register dst_high, FPURegister src) { 313 mfhc1(dst_high, src); 314 } 315 316 inline void FmoveHigh(FPURegister dst, Register src_high) { 317 mthc1(src_high, dst); 318 } 319 320 inline void FmoveLow(Register dst_low, FPURegister src) { 321 mfc1(dst_low, src); 322 } 323 324 void FmoveLow(FPURegister dst, Register src_low); 325 326 inline void Move(FPURegister dst, Register src_low, Register src_high) { 327 mtc1(src_low, dst); 328 mthc1(src_high, dst); 329 } 330 331 void Move(FPURegister dst, float imm); 332 void Move(FPURegister dst, double imm); 333 334 // Conditional move. 335 void Movz(Register rd, Register rs, Register rt); 336 void Movn(Register rd, Register rs, Register rt); 337 void Movt(Register rd, Register rs, uint16_t cc = 0); 338 void Movf(Register rd, Register rs, uint16_t cc = 0); 339 340 // Min, Max macros. 341 // On pre-r6 these functions may modify at and t8 registers. 342 void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2, 343 Label* nan = nullptr); 344 void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2, 345 Label* nan = nullptr); 346 void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2, 347 Label* nan = nullptr); 348 void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2, 349 Label* nan = nullptr); 350 351 void Clz(Register rd, Register rs); 352 353 // Jump unconditionally to given label. 354 // We NEED a nop in the branch delay slot, as it used by v8, for example in 355 // CodeGenerator::ProcessDeferred(). 356 // Currently the branch delay slot is filled by the MacroAssembler. 357 // Use rather b(Label) for code generation. 358 void jmp(Label* L) { 359 Branch(L); 360 } 361 362 void Load(Register dst, const MemOperand& src, Representation r); 363 void Store(Register src, const MemOperand& dst, Representation r); 364 365 void PushRoot(Heap::RootListIndex index) { 366 LoadRoot(at, index); 367 Push(at); 368 } 369 370 // Compare the object in a register to a value and jump if they are equal. 371 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) { 372 LoadRoot(at, index); 373 Branch(if_equal, eq, with, Operand(at)); 374 } 375 376 // Compare the object in a register to a value and jump if they are not equal. 377 void JumpIfNotRoot(Register with, Heap::RootListIndex index, 378 Label* if_not_equal) { 379 LoadRoot(at, index); 380 Branch(if_not_equal, ne, with, Operand(at)); 381 } 382 383 // Load an object from the root table. 384 void LoadRoot(Register destination, 385 Heap::RootListIndex index); 386 void LoadRoot(Register destination, 387 Heap::RootListIndex index, 388 Condition cond, Register src1, const Operand& src2); 389 390 // Store an object to the root table. 391 void StoreRoot(Register source, 392 Heap::RootListIndex index); 393 void StoreRoot(Register source, 394 Heap::RootListIndex index, 395 Condition cond, Register src1, const Operand& src2); 396 397 // --------------------------------------------------------------------------- 398 // GC Support 399 400 void IncrementalMarkingRecordWriteHelper(Register object, 401 Register value, 402 Register address); 403 404 enum RememberedSetFinalAction { 405 kReturnAtEnd, 406 kFallThroughAtEnd 407 }; 408 409 410 // Record in the remembered set the fact that we have a pointer to new space 411 // at the address pointed to by the addr register. Only works if addr is not 412 // in new space. 413 void RememberedSetHelper(Register object, // Used for debug code. 414 Register addr, 415 Register scratch, 416 SaveFPRegsMode save_fp, 417 RememberedSetFinalAction and_then); 418 419 void CheckPageFlag(Register object, 420 Register scratch, 421 int mask, 422 Condition cc, 423 Label* condition_met); 424 425 // Check if object is in new space. Jumps if the object is not in new space. 426 // The register scratch can be object itself, but it will be clobbered. 427 void JumpIfNotInNewSpace(Register object, 428 Register scratch, 429 Label* branch) { 430 InNewSpace(object, scratch, eq, branch); 431 } 432 433 // Check if object is in new space. Jumps if the object is in new space. 434 // The register scratch can be object itself, but scratch will be clobbered. 435 void JumpIfInNewSpace(Register object, 436 Register scratch, 437 Label* branch) { 438 InNewSpace(object, scratch, ne, branch); 439 } 440 441 // Check if an object has a given incremental marking color. 442 void HasColor(Register object, 443 Register scratch0, 444 Register scratch1, 445 Label* has_color, 446 int first_bit, 447 int second_bit); 448 449 void JumpIfBlack(Register object, 450 Register scratch0, 451 Register scratch1, 452 Label* on_black); 453 454 // Checks the color of an object. If the object is white we jump to the 455 // incremental marker. 456 void JumpIfWhite(Register value, Register scratch1, Register scratch2, 457 Register scratch3, Label* value_is_white); 458 459 // Notify the garbage collector that we wrote a pointer into an object. 460 // |object| is the object being stored into, |value| is the object being 461 // stored. value and scratch registers are clobbered by the operation. 462 // The offset is the offset from the start of the object, not the offset from 463 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 464 void RecordWriteField( 465 Register object, 466 int offset, 467 Register value, 468 Register scratch, 469 RAStatus ra_status, 470 SaveFPRegsMode save_fp, 471 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 472 SmiCheck smi_check = INLINE_SMI_CHECK, 473 PointersToHereCheck pointers_to_here_check_for_value = 474 kPointersToHereMaybeInteresting); 475 476 // As above, but the offset has the tag presubtracted. For use with 477 // MemOperand(reg, off). 478 inline void RecordWriteContextSlot( 479 Register context, 480 int offset, 481 Register value, 482 Register scratch, 483 RAStatus ra_status, 484 SaveFPRegsMode save_fp, 485 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 486 SmiCheck smi_check = INLINE_SMI_CHECK, 487 PointersToHereCheck pointers_to_here_check_for_value = 488 kPointersToHereMaybeInteresting) { 489 RecordWriteField(context, 490 offset + kHeapObjectTag, 491 value, 492 scratch, 493 ra_status, 494 save_fp, 495 remembered_set_action, 496 smi_check, 497 pointers_to_here_check_for_value); 498 } 499 500 // Notify the garbage collector that we wrote a code entry into a 501 // JSFunction. Only scratch is clobbered by the operation. 502 void RecordWriteCodeEntryField(Register js_function, Register code_entry, 503 Register scratch); 504 505 void RecordWriteForMap( 506 Register object, 507 Register map, 508 Register dst, 509 RAStatus ra_status, 510 SaveFPRegsMode save_fp); 511 512 // For a given |object| notify the garbage collector that the slot |address| 513 // has been written. |value| is the object being stored. The value and 514 // address registers are clobbered by the operation. 515 void RecordWrite( 516 Register object, 517 Register address, 518 Register value, 519 RAStatus ra_status, 520 SaveFPRegsMode save_fp, 521 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 522 SmiCheck smi_check = INLINE_SMI_CHECK, 523 PointersToHereCheck pointers_to_here_check_for_value = 524 kPointersToHereMaybeInteresting); 525 526 527 // --------------------------------------------------------------------------- 528 // Inline caching support. 529 530 void GetNumberHash(Register reg0, Register scratch); 531 532 inline void MarkCode(NopMarkerTypes type) { 533 nop(type); 534 } 535 536 // Check if the given instruction is a 'type' marker. 537 // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as 538 // nop(type)). These instructions are generated to mark special location in 539 // the code, like some special IC code. 540 static inline bool IsMarkedCode(Instr instr, int type) { 541 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 542 return IsNop(instr, type); 543 } 544 545 546 static inline int GetCodeMarker(Instr instr) { 547 uint32_t opcode = ((instr & kOpcodeMask)); 548 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift); 549 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift); 550 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift); 551 552 // Return <n> if we have a sll zero_reg, zero_reg, n 553 // else return -1. 554 bool sllzz = (opcode == SLL && 555 rt == static_cast<uint32_t>(ToNumber(zero_reg)) && 556 rs == static_cast<uint32_t>(ToNumber(zero_reg))); 557 int type = 558 (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1; 559 DCHECK((type == -1) || 560 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 561 return type; 562 } 563 564 565 566 // --------------------------------------------------------------------------- 567 // Allocation support. 568 569 // Allocate an object in new space or old space. The object_size is 570 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 571 // is passed. If the space is exhausted control continues at the gc_required 572 // label. The allocated object is returned in result. If the flag 573 // tag_allocated_object is true the result is tagged as as a heap object. 574 // All registers are clobbered also when control continues at the gc_required 575 // label. 576 void Allocate(int object_size, 577 Register result, 578 Register scratch1, 579 Register scratch2, 580 Label* gc_required, 581 AllocationFlags flags); 582 583 void Allocate(Register object_size, Register result, Register result_end, 584 Register scratch, Label* gc_required, AllocationFlags flags); 585 586 // FastAllocate is right now only used for folded allocations. It just 587 // increments the top pointer without checking against limit. This can only 588 // be done if it was proved earlier that the allocation will succeed. 589 void FastAllocate(int object_size, Register result, Register scratch1, 590 Register scratch2, AllocationFlags flags); 591 592 void FastAllocate(Register object_size, Register result, Register result_new, 593 Register scratch, AllocationFlags flags); 594 595 void AllocateTwoByteString(Register result, 596 Register length, 597 Register scratch1, 598 Register scratch2, 599 Register scratch3, 600 Label* gc_required); 601 void AllocateOneByteString(Register result, Register length, 602 Register scratch1, Register scratch2, 603 Register scratch3, Label* gc_required); 604 void AllocateTwoByteConsString(Register result, 605 Register length, 606 Register scratch1, 607 Register scratch2, 608 Label* gc_required); 609 void AllocateOneByteConsString(Register result, Register length, 610 Register scratch1, Register scratch2, 611 Label* gc_required); 612 void AllocateTwoByteSlicedString(Register result, 613 Register length, 614 Register scratch1, 615 Register scratch2, 616 Label* gc_required); 617 void AllocateOneByteSlicedString(Register result, Register length, 618 Register scratch1, Register scratch2, 619 Label* gc_required); 620 621 // Allocates a heap number or jumps to the gc_required label if the young 622 // space is full and a scavenge is needed. All registers are clobbered also 623 // when control continues at the gc_required label. 624 void AllocateHeapNumber(Register result, 625 Register scratch1, 626 Register scratch2, 627 Register heap_number_map, 628 Label* gc_required, 629 MutableMode mode = IMMUTABLE); 630 631 void AllocateHeapNumberWithValue(Register result, 632 FPURegister value, 633 Register scratch1, 634 Register scratch2, 635 Label* gc_required); 636 637 // Allocate and initialize a JSValue wrapper with the specified {constructor} 638 // and {value}. 639 void AllocateJSValue(Register result, Register constructor, Register value, 640 Register scratch1, Register scratch2, 641 Label* gc_required); 642 643 // --------------------------------------------------------------------------- 644 // Instruction macros. 645 646 #define DEFINE_INSTRUCTION(instr) \ 647 void instr(Register rd, Register rs, const Operand& rt); \ 648 void instr(Register rd, Register rs, Register rt) { \ 649 instr(rd, rs, Operand(rt)); \ 650 } \ 651 void instr(Register rs, Register rt, int32_t j) { \ 652 instr(rs, rt, Operand(j)); \ 653 } 654 655 #define DEFINE_INSTRUCTION2(instr) \ 656 void instr(Register rs, const Operand& rt); \ 657 void instr(Register rs, Register rt) { \ 658 instr(rs, Operand(rt)); \ 659 } \ 660 void instr(Register rs, int32_t j) { \ 661 instr(rs, Operand(j)); \ 662 } 663 664 DEFINE_INSTRUCTION(Addu); 665 DEFINE_INSTRUCTION(Daddu); 666 DEFINE_INSTRUCTION(Div); 667 DEFINE_INSTRUCTION(Divu); 668 DEFINE_INSTRUCTION(Ddivu); 669 DEFINE_INSTRUCTION(Mod); 670 DEFINE_INSTRUCTION(Modu); 671 DEFINE_INSTRUCTION(Ddiv); 672 DEFINE_INSTRUCTION(Subu); 673 DEFINE_INSTRUCTION(Dsubu); 674 DEFINE_INSTRUCTION(Dmod); 675 DEFINE_INSTRUCTION(Dmodu); 676 DEFINE_INSTRUCTION(Mul); 677 DEFINE_INSTRUCTION(Mulh); 678 DEFINE_INSTRUCTION(Mulhu); 679 DEFINE_INSTRUCTION(Dmul); 680 DEFINE_INSTRUCTION(Dmulh); 681 DEFINE_INSTRUCTION2(Mult); 682 DEFINE_INSTRUCTION2(Dmult); 683 DEFINE_INSTRUCTION2(Multu); 684 DEFINE_INSTRUCTION2(Dmultu); 685 DEFINE_INSTRUCTION2(Div); 686 DEFINE_INSTRUCTION2(Ddiv); 687 DEFINE_INSTRUCTION2(Divu); 688 DEFINE_INSTRUCTION2(Ddivu); 689 690 DEFINE_INSTRUCTION(And); 691 DEFINE_INSTRUCTION(Or); 692 DEFINE_INSTRUCTION(Xor); 693 DEFINE_INSTRUCTION(Nor); 694 DEFINE_INSTRUCTION2(Neg); 695 696 DEFINE_INSTRUCTION(Slt); 697 DEFINE_INSTRUCTION(Sltu); 698 699 // MIPS32 R2 instruction macro. 700 DEFINE_INSTRUCTION(Ror); 701 DEFINE_INSTRUCTION(Dror); 702 703 #undef DEFINE_INSTRUCTION 704 #undef DEFINE_INSTRUCTION2 705 #undef DEFINE_INSTRUCTION3 706 707 // Load Scaled Address instructions. Parameter sa (shift argument) must be 708 // between [1, 31] (inclusive). On pre-r6 architectures the scratch register 709 // may be clobbered. 710 void Lsa(Register rd, Register rs, Register rt, uint8_t sa, 711 Register scratch = at); 712 void Dlsa(Register rd, Register rs, Register rt, uint8_t sa, 713 Register scratch = at); 714 715 void Pref(int32_t hint, const MemOperand& rs); 716 717 718 // --------------------------------------------------------------------------- 719 // Pseudo-instructions. 720 721 // Change endianness 722 void ByteSwapSigned(Register dest, Register src, int operand_size); 723 void ByteSwapUnsigned(Register dest, Register src, int operand_size); 724 725 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } 726 727 void Ulh(Register rd, const MemOperand& rs); 728 void Ulhu(Register rd, const MemOperand& rs); 729 void Ush(Register rd, const MemOperand& rs, Register scratch); 730 731 void Ulw(Register rd, const MemOperand& rs); 732 void Ulwu(Register rd, const MemOperand& rs); 733 void Usw(Register rd, const MemOperand& rs); 734 735 void Uld(Register rd, const MemOperand& rs); 736 void Usd(Register rd, const MemOperand& rs); 737 738 void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch); 739 void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch); 740 741 void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch); 742 void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch); 743 744 void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at); 745 void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at); 746 747 // Load int32 in the rd register. 748 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); 749 inline bool LiLower32BitHelper(Register rd, Operand j); 750 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { 751 li(rd, Operand(j), mode); 752 } 753 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE); 754 755 // Push multiple registers on the stack. 756 // Registers are saved in numerical order, with higher numbered registers 757 // saved in higher memory addresses. 758 void MultiPush(RegList regs); 759 void MultiPushReversed(RegList regs); 760 761 void MultiPushFPU(RegList regs); 762 void MultiPushReversedFPU(RegList regs); 763 764 void push(Register src) { 765 Daddu(sp, sp, Operand(-kPointerSize)); 766 sd(src, MemOperand(sp, 0)); 767 } 768 void Push(Register src) { push(src); } 769 770 // Push a handle. 771 void Push(Handle<Object> handle); 772 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 773 774 // Push two registers. Pushes leftmost register first (to highest address). 775 void Push(Register src1, Register src2) { 776 Dsubu(sp, sp, Operand(2 * kPointerSize)); 777 sd(src1, MemOperand(sp, 1 * kPointerSize)); 778 sd(src2, MemOperand(sp, 0 * kPointerSize)); 779 } 780 781 // Push three registers. Pushes leftmost register first (to highest address). 782 void Push(Register src1, Register src2, Register src3) { 783 Dsubu(sp, sp, Operand(3 * kPointerSize)); 784 sd(src1, MemOperand(sp, 2 * kPointerSize)); 785 sd(src2, MemOperand(sp, 1 * kPointerSize)); 786 sd(src3, MemOperand(sp, 0 * kPointerSize)); 787 } 788 789 // Push four registers. Pushes leftmost register first (to highest address). 790 void Push(Register src1, Register src2, Register src3, Register src4) { 791 Dsubu(sp, sp, Operand(4 * kPointerSize)); 792 sd(src1, MemOperand(sp, 3 * kPointerSize)); 793 sd(src2, MemOperand(sp, 2 * kPointerSize)); 794 sd(src3, MemOperand(sp, 1 * kPointerSize)); 795 sd(src4, MemOperand(sp, 0 * kPointerSize)); 796 } 797 798 // Push five registers. Pushes leftmost register first (to highest address). 799 void Push(Register src1, Register src2, Register src3, Register src4, 800 Register src5) { 801 Dsubu(sp, sp, Operand(5 * kPointerSize)); 802 sd(src1, MemOperand(sp, 4 * kPointerSize)); 803 sd(src2, MemOperand(sp, 3 * kPointerSize)); 804 sd(src3, MemOperand(sp, 2 * kPointerSize)); 805 sd(src4, MemOperand(sp, 1 * kPointerSize)); 806 sd(src5, MemOperand(sp, 0 * kPointerSize)); 807 } 808 809 void Push(Register src, Condition cond, Register tst1, Register tst2) { 810 // Since we don't have conditional execution we use a Branch. 811 Branch(3, cond, tst1, Operand(tst2)); 812 Dsubu(sp, sp, Operand(kPointerSize)); 813 sd(src, MemOperand(sp, 0)); 814 } 815 816 void PushRegisterAsTwoSmis(Register src, Register scratch = at); 817 void PopRegisterAsTwoSmis(Register dst, Register scratch = at); 818 819 // Pops multiple values from the stack and load them in the 820 // registers specified in regs. Pop order is the opposite as in MultiPush. 821 void MultiPop(RegList regs); 822 void MultiPopReversed(RegList regs); 823 824 void MultiPopFPU(RegList regs); 825 void MultiPopReversedFPU(RegList regs); 826 827 void pop(Register dst) { 828 ld(dst, MemOperand(sp, 0)); 829 Daddu(sp, sp, Operand(kPointerSize)); 830 } 831 void Pop(Register dst) { pop(dst); } 832 833 // Pop two registers. Pops rightmost register first (from lower address). 834 void Pop(Register src1, Register src2) { 835 DCHECK(!src1.is(src2)); 836 ld(src2, MemOperand(sp, 0 * kPointerSize)); 837 ld(src1, MemOperand(sp, 1 * kPointerSize)); 838 Daddu(sp, sp, 2 * kPointerSize); 839 } 840 841 // Pop three registers. Pops rightmost register first (from lower address). 842 void Pop(Register src1, Register src2, Register src3) { 843 ld(src3, MemOperand(sp, 0 * kPointerSize)); 844 ld(src2, MemOperand(sp, 1 * kPointerSize)); 845 ld(src1, MemOperand(sp, 2 * kPointerSize)); 846 Daddu(sp, sp, 3 * kPointerSize); 847 } 848 849 void Pop(uint32_t count = 1) { 850 Daddu(sp, sp, Operand(count * kPointerSize)); 851 } 852 853 // Push a fixed frame, consisting of ra, fp. 854 void PushCommonFrame(Register marker_reg = no_reg); 855 856 // Push a standard frame, consisting of ra, fp, context and JS function. 857 void PushStandardFrame(Register function_reg); 858 859 void PopCommonFrame(Register marker_reg = no_reg); 860 861 // Push and pop the registers that can hold pointers, as defined by the 862 // RegList constant kSafepointSavedRegisters. 863 void PushSafepointRegisters(); 864 void PopSafepointRegisters(); 865 // Store value in register src in the safepoint stack slot for 866 // register dst. 867 void StoreToSafepointRegisterSlot(Register src, Register dst); 868 // Load the value of the src register from its safepoint stack slot 869 // into register dst. 870 void LoadFromSafepointRegisterSlot(Register dst, Register src); 871 872 // MIPS64 R2 instruction macro. 873 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); 874 void Dins(Register rt, Register rs, uint16_t pos, uint16_t size); 875 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); 876 877 void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size); 878 879 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size); 880 void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size); 881 void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size); 882 void Neg_s(FPURegister fd, FPURegister fs); 883 void Neg_d(FPURegister fd, FPURegister fs); 884 885 // MIPS64 R6 instruction macros. 886 void Bovc(Register rt, Register rs, Label* L); 887 void Bnvc(Register rt, Register rs, Label* L); 888 889 // --------------------------------------------------------------------------- 890 // FPU macros. These do not handle special cases like NaN or +- inf. 891 892 // Convert unsigned word to double. 893 void Cvt_d_uw(FPURegister fd, FPURegister fs); 894 void Cvt_d_uw(FPURegister fd, Register rs); 895 896 // Convert unsigned long to double. 897 void Cvt_d_ul(FPURegister fd, FPURegister fs); 898 void Cvt_d_ul(FPURegister fd, Register rs); 899 900 // Convert unsigned word to float. 901 void Cvt_s_uw(FPURegister fd, FPURegister fs); 902 void Cvt_s_uw(FPURegister fd, Register rs); 903 904 // Convert unsigned long to float. 905 void Cvt_s_ul(FPURegister fd, FPURegister fs); 906 void Cvt_s_ul(FPURegister fd, Register rs); 907 908 // Convert double to unsigned long. 909 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch); 910 911 void Trunc_l_d(FPURegister fd, FPURegister fs); 912 void Round_l_d(FPURegister fd, FPURegister fs); 913 void Floor_l_d(FPURegister fd, FPURegister fs); 914 void Ceil_l_d(FPURegister fd, FPURegister fs); 915 916 // Convert double to unsigned word. 917 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); 918 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); 919 920 // Convert single to unsigned word. 921 void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); 922 void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch); 923 924 // Convert double to unsigned long. 925 void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, 926 Register result = no_reg); 927 void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch, 928 Register result = no_reg); 929 930 // Convert single to unsigned long. 931 void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, 932 Register result = no_reg); 933 void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch, 934 Register result = no_reg); 935 936 void Trunc_w_d(FPURegister fd, FPURegister fs); 937 void Round_w_d(FPURegister fd, FPURegister fs); 938 void Floor_w_d(FPURegister fd, FPURegister fs); 939 void Ceil_w_d(FPURegister fd, FPURegister fs); 940 941 // Preserve value of a NaN operand 942 void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs, 943 FPURegister ft); 944 void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs, 945 FPURegister ft); 946 947 void Madd_d(FPURegister fd, 948 FPURegister fr, 949 FPURegister fs, 950 FPURegister ft, 951 FPURegister scratch); 952 953 // Wrapper functions for the different cmp/branch types. 954 inline void BranchF32(Label* target, Label* nan, Condition cc, 955 FPURegister cmp1, FPURegister cmp2, 956 BranchDelaySlot bd = PROTECT) { 957 BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd); 958 } 959 960 inline void BranchF64(Label* target, Label* nan, Condition cc, 961 FPURegister cmp1, FPURegister cmp2, 962 BranchDelaySlot bd = PROTECT) { 963 BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd); 964 } 965 966 // Alternate (inline) version for better readability with USE_DELAY_SLOT. 967 inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan, 968 Condition cc, FPURegister cmp1, FPURegister cmp2) { 969 BranchF64(target, nan, cc, cmp1, cmp2, bd); 970 } 971 972 inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan, 973 Condition cc, FPURegister cmp1, FPURegister cmp2) { 974 BranchF32(target, nan, cc, cmp1, cmp2, bd); 975 } 976 977 // Alias functions for backward compatibility. 978 inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1, 979 FPURegister cmp2, BranchDelaySlot bd = PROTECT) { 980 BranchF64(target, nan, cc, cmp1, cmp2, bd); 981 } 982 983 inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan, 984 Condition cc, FPURegister cmp1, FPURegister cmp2) { 985 BranchF64(bd, target, nan, cc, cmp1, cmp2); 986 } 987 988 // Truncates a double using a specific rounding mode, and writes the value 989 // to the result register. 990 // The except_flag will contain any exceptions caused by the instruction. 991 // If check_inexact is kDontCheckForInexactConversion, then the inexact 992 // exception is masked. 993 void EmitFPUTruncate(FPURoundingMode rounding_mode, 994 Register result, 995 DoubleRegister double_input, 996 Register scratch, 997 DoubleRegister double_scratch, 998 Register except_flag, 999 CheckForInexactConversion check_inexact 1000 = kDontCheckForInexactConversion); 1001 1002 // Performs a truncating conversion of a floating point number as used by 1003 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 1004 // succeeds, otherwise falls through if result is saturated. On return 1005 // 'result' either holds answer, or is clobbered on fall through. 1006 // 1007 // Only public for the test code in test-code-stubs-arm.cc. 1008 void TryInlineTruncateDoubleToI(Register result, 1009 DoubleRegister input, 1010 Label* done); 1011 1012 // Performs a truncating conversion of a floating point number as used by 1013 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1014 // Exits with 'result' holding the answer. 1015 void TruncateDoubleToI(Register result, DoubleRegister double_input); 1016 1017 // Performs a truncating conversion of a heap number as used by 1018 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1019 // must be different registers. Exits with 'result' holding the answer. 1020 void TruncateHeapNumberToI(Register result, Register object); 1021 1022 // Converts the smi or heap number in object to an int32 using the rules 1023 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1024 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1025 // different registers. 1026 void TruncateNumberToI(Register object, 1027 Register result, 1028 Register heap_number_map, 1029 Register scratch, 1030 Label* not_int32); 1031 1032 // Loads the number from object into dst register. 1033 // If |object| is neither smi nor heap number, |not_number| is jumped to 1034 // with |object| still intact. 1035 void LoadNumber(Register object, 1036 FPURegister dst, 1037 Register heap_number_map, 1038 Register scratch, 1039 Label* not_number); 1040 1041 // Loads the number from object into double_dst in the double format. 1042 // Control will jump to not_int32 if the value cannot be exactly represented 1043 // by a 32-bit integer. 1044 // Floating point value in the 32-bit integer range that are not exact integer 1045 // won't be loaded. 1046 void LoadNumberAsInt32Double(Register object, 1047 DoubleRegister double_dst, 1048 Register heap_number_map, 1049 Register scratch1, 1050 Register scratch2, 1051 FPURegister double_scratch, 1052 Label* not_int32); 1053 1054 // Loads the number from object into dst as a 32-bit integer. 1055 // Control will jump to not_int32 if the object cannot be exactly represented 1056 // by a 32-bit integer. 1057 // Floating point value in the 32-bit integer range that are not exact integer 1058 // won't be converted. 1059 void LoadNumberAsInt32(Register object, 1060 Register dst, 1061 Register heap_number_map, 1062 Register scratch1, 1063 Register scratch2, 1064 FPURegister double_scratch0, 1065 FPURegister double_scratch1, 1066 Label* not_int32); 1067 1068 // Enter exit frame. 1069 // argc - argument count to be dropped by LeaveExitFrame. 1070 // save_doubles - saves FPU registers on stack, currently disabled. 1071 // stack_space - extra stack space. 1072 void EnterExitFrame(bool save_doubles, int stack_space = 0, 1073 StackFrame::Type frame_type = StackFrame::EXIT); 1074 1075 // Leave the current exit frame. 1076 void LeaveExitFrame(bool save_doubles, Register arg_count, 1077 bool restore_context, bool do_return = NO_EMIT_RETURN, 1078 bool argument_count_is_length = false); 1079 1080 // Get the actual activation frame alignment for target environment. 1081 static int ActivationFrameAlignment(); 1082 1083 // Make sure the stack is aligned. Only emits code in debug mode. 1084 void AssertStackIsAligned(); 1085 1086 void LoadContext(Register dst, int context_chain_length); 1087 1088 // Load the global object from the current context. 1089 void LoadGlobalObject(Register dst) { 1090 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst); 1091 } 1092 1093 // Load the global proxy from the current context. 1094 void LoadGlobalProxy(Register dst) { 1095 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); 1096 } 1097 1098 // Conditionally load the cached Array transitioned map of type 1099 // transitioned_kind from the native context if the map in register 1100 // map_in_out is the cached Array map in the native context of 1101 // expected_kind. 1102 void LoadTransitionedArrayMapConditional( 1103 ElementsKind expected_kind, 1104 ElementsKind transitioned_kind, 1105 Register map_in_out, 1106 Register scratch, 1107 Label* no_map_match); 1108 1109 void LoadNativeContextSlot(int index, Register dst); 1110 1111 // Load the initial map from the global function. The registers 1112 // function and map can be the same, function is then overwritten. 1113 void LoadGlobalFunctionInitialMap(Register function, 1114 Register map, 1115 Register scratch); 1116 1117 void InitializeRootRegister() { 1118 ExternalReference roots_array_start = 1119 ExternalReference::roots_array_start(isolate()); 1120 li(kRootRegister, Operand(roots_array_start)); 1121 } 1122 1123 // ------------------------------------------------------------------------- 1124 // JavaScript invokes. 1125 1126 // Removes current frame and its arguments from the stack preserving 1127 // the arguments and a return address pushed to the stack for the next call. 1128 // Both |callee_args_count| and |caller_args_count_reg| do not include 1129 // receiver. |callee_args_count| is not modified, |caller_args_count_reg| 1130 // is trashed. 1131 void PrepareForTailCall(const ParameterCount& callee_args_count, 1132 Register caller_args_count_reg, Register scratch0, 1133 Register scratch1); 1134 1135 // Invoke the JavaScript function code by either calling or jumping. 1136 void InvokeFunctionCode(Register function, Register new_target, 1137 const ParameterCount& expected, 1138 const ParameterCount& actual, InvokeFlag flag, 1139 const CallWrapper& call_wrapper); 1140 1141 void FloodFunctionIfStepping(Register fun, Register new_target, 1142 const ParameterCount& expected, 1143 const ParameterCount& actual); 1144 1145 // Invoke the JavaScript function in the given register. Changes the 1146 // current context to the context in the function before invoking. 1147 void InvokeFunction(Register function, 1148 Register new_target, 1149 const ParameterCount& actual, 1150 InvokeFlag flag, 1151 const CallWrapper& call_wrapper); 1152 1153 void InvokeFunction(Register function, 1154 const ParameterCount& expected, 1155 const ParameterCount& actual, 1156 InvokeFlag flag, 1157 const CallWrapper& call_wrapper); 1158 1159 void InvokeFunction(Handle<JSFunction> function, 1160 const ParameterCount& expected, 1161 const ParameterCount& actual, 1162 InvokeFlag flag, 1163 const CallWrapper& call_wrapper); 1164 1165 1166 void IsObjectJSStringType(Register object, 1167 Register scratch, 1168 Label* fail); 1169 1170 void IsObjectNameType(Register object, 1171 Register scratch, 1172 Label* fail); 1173 1174 // ------------------------------------------------------------------------- 1175 // Debugger Support. 1176 1177 void DebugBreak(); 1178 1179 // ------------------------------------------------------------------------- 1180 // Exception handling. 1181 1182 // Push a new stack handler and link into stack handler chain. 1183 void PushStackHandler(); 1184 1185 // Unlink the stack handler on top of the stack from the stack handler chain. 1186 // Must preserve the result register. 1187 void PopStackHandler(); 1188 1189 // Initialize fields with filler values. Fields starting at |current_address| 1190 // not including |end_address| are overwritten with the value in |filler|. At 1191 // the end the loop, |current_address| takes the value of |end_address|. 1192 void InitializeFieldsWithFiller(Register current_address, 1193 Register end_address, Register filler); 1194 1195 // ------------------------------------------------------------------------- 1196 // Support functions. 1197 1198 // Machine code version of Map::GetConstructor(). 1199 // |temp| holds |result|'s map when done, and |temp2| its instance type. 1200 void GetMapConstructor(Register result, Register map, Register temp, 1201 Register temp2); 1202 1203 // Try to get function prototype of a function and puts the value in 1204 // the result register. Checks that the function really is a 1205 // function and jumps to the miss label if the fast checks fail. The 1206 // function register will be untouched; the other registers may be 1207 // clobbered. 1208 void TryGetFunctionPrototype(Register function, Register result, 1209 Register scratch, Label* miss); 1210 1211 void GetObjectType(Register function, 1212 Register map, 1213 Register type_reg); 1214 1215 void GetInstanceType(Register object_map, Register object_instance_type) { 1216 lbu(object_instance_type, 1217 FieldMemOperand(object_map, Map::kInstanceTypeOffset)); 1218 } 1219 1220 // Check if a map for a JSObject indicates that the object can have both smi 1221 // and HeapObject elements. Jump to the specified label if it does not. 1222 void CheckFastObjectElements(Register map, 1223 Register scratch, 1224 Label* fail); 1225 1226 // Check if a map for a JSObject indicates that the object has fast smi only 1227 // elements. Jump to the specified label if it does not. 1228 void CheckFastSmiElements(Register map, 1229 Register scratch, 1230 Label* fail); 1231 1232 // Check to see if maybe_number can be stored as a double in 1233 // FastDoubleElements. If it can, store it at the index specified by key in 1234 // the FastDoubleElements array elements. Otherwise jump to fail. 1235 void StoreNumberToDoubleElements(Register value_reg, 1236 Register key_reg, 1237 Register elements_reg, 1238 Register scratch1, 1239 Register scratch2, 1240 Label* fail, 1241 int elements_offset = 0); 1242 1243 // Compare an object's map with the specified map and its transitioned 1244 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to 1245 // "branch_to" if the result of the comparison is "cond". If multiple map 1246 // compares are required, the compare sequences branches to early_success. 1247 void CompareMapAndBranch(Register obj, 1248 Register scratch, 1249 Handle<Map> map, 1250 Label* early_success, 1251 Condition cond, 1252 Label* branch_to); 1253 1254 // As above, but the map of the object is already loaded into the register 1255 // which is preserved by the code generated. 1256 void CompareMapAndBranch(Register obj_map, 1257 Handle<Map> map, 1258 Label* early_success, 1259 Condition cond, 1260 Label* branch_to); 1261 1262 // Check if the map of an object is equal to a specified map and branch to 1263 // label if not. Skip the smi check if not required (object is known to be a 1264 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 1265 // against maps that are ElementsKind transition maps of the specificed map. 1266 void CheckMap(Register obj, 1267 Register scratch, 1268 Handle<Map> map, 1269 Label* fail, 1270 SmiCheckType smi_check_type); 1271 1272 1273 void CheckMap(Register obj, 1274 Register scratch, 1275 Heap::RootListIndex index, 1276 Label* fail, 1277 SmiCheckType smi_check_type); 1278 1279 // Check if the map of an object is equal to a specified weak map and branch 1280 // to a specified target if equal. Skip the smi check if not required 1281 // (object is known to be a heap object) 1282 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2, 1283 Handle<WeakCell> cell, Handle<Code> success, 1284 SmiCheckType smi_check_type); 1285 1286 // If the value is a NaN, canonicalize the value else, do nothing. 1287 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); 1288 1289 1290 // Get value of the weak cell. 1291 void GetWeakValue(Register value, Handle<WeakCell> cell); 1292 1293 // Load the value of the weak cell in the value register. Branch to the 1294 // given miss label is the weak cell was cleared. 1295 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss); 1296 1297 // Load and check the instance type of an object for being a string. 1298 // Loads the type into the second argument register. 1299 // Returns a condition that will be enabled if the object was a string. 1300 Condition IsObjectStringType(Register obj, 1301 Register type, 1302 Register result) { 1303 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset)); 1304 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); 1305 And(type, type, Operand(kIsNotStringMask)); 1306 DCHECK_EQ(0u, kStringTag); 1307 return eq; 1308 } 1309 1310 // Get the number of least significant bits from a register. 1311 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 1312 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 1313 1314 // Load the value of a number object into a FPU double register. If the 1315 // object is not a number a jump to the label not_number is performed 1316 // and the FPU double register is unchanged. 1317 void ObjectToDoubleFPURegister( 1318 Register object, 1319 FPURegister value, 1320 Register scratch1, 1321 Register scratch2, 1322 Register heap_number_map, 1323 Label* not_number, 1324 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS); 1325 1326 // Load the value of a smi object into a FPU double register. The register 1327 // scratch1 can be the same register as smi in which case smi will hold the 1328 // untagged value afterwards. 1329 void SmiToDoubleFPURegister(Register smi, 1330 FPURegister value, 1331 Register scratch1); 1332 1333 // ------------------------------------------------------------------------- 1334 // Overflow handling functions. 1335 // Usage: first call the appropriate arithmetic function, then call one of the 1336 // jump functions with the overflow_dst register as the second parameter. 1337 1338 inline void AddBranchOvf(Register dst, Register left, const Operand& right, 1339 Label* overflow_label, Register scratch = at) { 1340 AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch); 1341 } 1342 1343 inline void AddBranchNoOvf(Register dst, Register left, const Operand& right, 1344 Label* no_overflow_label, Register scratch = at) { 1345 AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); 1346 } 1347 1348 void AddBranchOvf(Register dst, Register left, const Operand& right, 1349 Label* overflow_label, Label* no_overflow_label, 1350 Register scratch = at); 1351 1352 void AddBranchOvf(Register dst, Register left, Register right, 1353 Label* overflow_label, Label* no_overflow_label, 1354 Register scratch = at); 1355 1356 inline void SubBranchOvf(Register dst, Register left, const Operand& right, 1357 Label* overflow_label, Register scratch = at) { 1358 SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch); 1359 } 1360 1361 inline void SubBranchNoOvf(Register dst, Register left, const Operand& right, 1362 Label* no_overflow_label, Register scratch = at) { 1363 SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); 1364 } 1365 1366 void SubBranchOvf(Register dst, Register left, const Operand& right, 1367 Label* overflow_label, Label* no_overflow_label, 1368 Register scratch = at); 1369 1370 void SubBranchOvf(Register dst, Register left, Register right, 1371 Label* overflow_label, Label* no_overflow_label, 1372 Register scratch = at); 1373 1374 inline void MulBranchOvf(Register dst, Register left, const Operand& right, 1375 Label* overflow_label, Register scratch = at) { 1376 MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch); 1377 } 1378 1379 inline void MulBranchNoOvf(Register dst, Register left, const Operand& right, 1380 Label* no_overflow_label, Register scratch = at) { 1381 MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); 1382 } 1383 1384 void MulBranchOvf(Register dst, Register left, const Operand& right, 1385 Label* overflow_label, Label* no_overflow_label, 1386 Register scratch = at); 1387 1388 void MulBranchOvf(Register dst, Register left, Register right, 1389 Label* overflow_label, Label* no_overflow_label, 1390 Register scratch = at); 1391 1392 inline void DaddBranchOvf(Register dst, Register left, const Operand& right, 1393 Label* overflow_label, Register scratch = at) { 1394 DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch); 1395 } 1396 1397 inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right, 1398 Label* no_overflow_label, Register scratch = at) { 1399 DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); 1400 } 1401 1402 void DaddBranchOvf(Register dst, Register left, const Operand& right, 1403 Label* overflow_label, Label* no_overflow_label, 1404 Register scratch = at); 1405 1406 void DaddBranchOvf(Register dst, Register left, Register right, 1407 Label* overflow_label, Label* no_overflow_label, 1408 Register scratch = at); 1409 1410 inline void DsubBranchOvf(Register dst, Register left, const Operand& right, 1411 Label* overflow_label, Register scratch = at) { 1412 DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch); 1413 } 1414 1415 inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right, 1416 Label* no_overflow_label, Register scratch = at) { 1417 DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); 1418 } 1419 1420 void DsubBranchOvf(Register dst, Register left, const Operand& right, 1421 Label* overflow_label, Label* no_overflow_label, 1422 Register scratch = at); 1423 1424 void DsubBranchOvf(Register dst, Register left, Register right, 1425 Label* overflow_label, Label* no_overflow_label, 1426 Register scratch = at); 1427 1428 void BranchOnOverflow(Label* label, 1429 Register overflow_check, 1430 BranchDelaySlot bd = PROTECT) { 1431 Branch(label, lt, overflow_check, Operand(zero_reg), bd); 1432 } 1433 1434 void BranchOnNoOverflow(Label* label, 1435 Register overflow_check, 1436 BranchDelaySlot bd = PROTECT) { 1437 Branch(label, ge, overflow_check, Operand(zero_reg), bd); 1438 } 1439 1440 void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) { 1441 Ret(lt, overflow_check, Operand(zero_reg), bd); 1442 } 1443 1444 void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) { 1445 Ret(ge, overflow_check, Operand(zero_reg), bd); 1446 } 1447 1448 // ------------------------------------------------------------------------- 1449 // Runtime calls. 1450 1451 // See comments at the beginning of CEntryStub::Generate. 1452 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } 1453 1454 inline void PrepareCEntryFunction(const ExternalReference& ref) { 1455 li(a1, Operand(ref)); 1456 } 1457 1458 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \ 1459 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT 1460 1461 // Call a code stub. 1462 void CallStub(CodeStub* stub, 1463 TypeFeedbackId ast_id = TypeFeedbackId::None(), 1464 COND_ARGS); 1465 1466 // Tail call a code stub (jump). 1467 void TailCallStub(CodeStub* stub, COND_ARGS); 1468 1469 #undef COND_ARGS 1470 1471 void CallJSExitStub(CodeStub* stub); 1472 1473 // Call a runtime routine. 1474 void CallRuntime(const Runtime::Function* f, int num_arguments, 1475 SaveFPRegsMode save_doubles = kDontSaveFPRegs, 1476 BranchDelaySlot bd = PROTECT); 1477 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { 1478 const Runtime::Function* function = Runtime::FunctionForId(fid); 1479 CallRuntime(function, function->nargs, kSaveFPRegs); 1480 } 1481 1482 // Convenience function: Same as above, but takes the fid instead. 1483 void CallRuntime(Runtime::FunctionId fid, 1484 SaveFPRegsMode save_doubles = kDontSaveFPRegs, 1485 BranchDelaySlot bd = PROTECT) { 1486 const Runtime::Function* function = Runtime::FunctionForId(fid); 1487 CallRuntime(function, function->nargs, save_doubles, bd); 1488 } 1489 1490 // Convenience function: Same as above, but takes the fid instead. 1491 void CallRuntime(Runtime::FunctionId fid, int num_arguments, 1492 SaveFPRegsMode save_doubles = kDontSaveFPRegs, 1493 BranchDelaySlot bd = PROTECT) { 1494 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles, bd); 1495 } 1496 1497 // Convenience function: call an external reference. 1498 void CallExternalReference(const ExternalReference& ext, 1499 int num_arguments, 1500 BranchDelaySlot bd = PROTECT); 1501 1502 // Convenience function: tail call a runtime routine (jump). 1503 void TailCallRuntime(Runtime::FunctionId fid); 1504 1505 int CalculateStackPassedWords(int num_reg_arguments, 1506 int num_double_arguments); 1507 1508 // Before calling a C-function from generated code, align arguments on stack 1509 // and add space for the four mips argument slots. 1510 // After aligning the frame, non-register arguments must be stored on the 1511 // stack, after the argument-slots using helper: CFunctionArgumentOperand(). 1512 // The argument count assumes all arguments are word sized. 1513 // Some compilers/platforms require the stack to be aligned when calling 1514 // C++ code. 1515 // Needs a scratch register to do some arithmetic. This register will be 1516 // trashed. 1517 void PrepareCallCFunction(int num_reg_arguments, 1518 int num_double_registers, 1519 Register scratch); 1520 void PrepareCallCFunction(int num_reg_arguments, 1521 Register scratch); 1522 1523 // Arguments 1-4 are placed in registers a0 thru a3 respectively. 1524 // Arguments 5..n are stored to stack using following: 1525 // sw(a4, CFunctionArgumentOperand(5)); 1526 1527 // Calls a C function and cleans up the space for arguments allocated 1528 // by PrepareCallCFunction. The called function is not allowed to trigger a 1529 // garbage collection, since that might move the code and invalidate the 1530 // return address (unless this is somehow accounted for by the called 1531 // function). 1532 void CallCFunction(ExternalReference function, int num_arguments); 1533 void CallCFunction(Register function, int num_arguments); 1534 void CallCFunction(ExternalReference function, 1535 int num_reg_arguments, 1536 int num_double_arguments); 1537 void CallCFunction(Register function, 1538 int num_reg_arguments, 1539 int num_double_arguments); 1540 void MovFromFloatResult(DoubleRegister dst); 1541 void MovFromFloatParameter(DoubleRegister dst); 1542 1543 // There are two ways of passing double arguments on MIPS, depending on 1544 // whether soft or hard floating point ABI is used. These functions 1545 // abstract parameter passing for the three different ways we call 1546 // C functions from generated code. 1547 void MovToFloatParameter(DoubleRegister src); 1548 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); 1549 void MovToFloatResult(DoubleRegister src); 1550 1551 // Jump to the builtin routine. 1552 void JumpToExternalReference(const ExternalReference& builtin, 1553 BranchDelaySlot bd = PROTECT, 1554 bool builtin_exit_frame = false); 1555 1556 struct Unresolved { 1557 int pc; 1558 uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders. 1559 const char* name; 1560 }; 1561 1562 Handle<Object> CodeObject() { 1563 DCHECK(!code_object_.is_null()); 1564 return code_object_; 1565 } 1566 1567 // Emit code for a truncating division by a constant. The dividend register is 1568 // unchanged and at gets clobbered. Dividend and result must be different. 1569 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1570 1571 // ------------------------------------------------------------------------- 1572 // StatsCounter support. 1573 1574 void SetCounter(StatsCounter* counter, int value, 1575 Register scratch1, Register scratch2); 1576 void IncrementCounter(StatsCounter* counter, int value, 1577 Register scratch1, Register scratch2); 1578 void DecrementCounter(StatsCounter* counter, int value, 1579 Register scratch1, Register scratch2); 1580 1581 1582 // ------------------------------------------------------------------------- 1583 // Debugging. 1584 1585 // Calls Abort(msg) if the condition cc is not satisfied. 1586 // Use --debug_code to enable. 1587 void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt); 1588 void AssertFastElements(Register elements); 1589 1590 // Like Assert(), but always enabled. 1591 void Check(Condition cc, BailoutReason reason, Register rs, Operand rt); 1592 1593 // Print a message to stdout and abort execution. 1594 void Abort(BailoutReason msg); 1595 1596 // Verify restrictions about code generated in stubs. 1597 void set_generating_stub(bool value) { generating_stub_ = value; } 1598 bool generating_stub() { return generating_stub_; } 1599 void set_has_frame(bool value) { has_frame_ = value; } 1600 bool has_frame() { return has_frame_; } 1601 inline bool AllowThisStubCall(CodeStub* stub); 1602 1603 // --------------------------------------------------------------------------- 1604 // Number utilities. 1605 1606 // Check whether the value of reg is a power of two and not zero. If not 1607 // control continues at the label not_power_of_two. If reg is a power of two 1608 // the register scratch contains the value of (reg - 1) when control falls 1609 // through. 1610 void JumpIfNotPowerOfTwoOrZero(Register reg, 1611 Register scratch, 1612 Label* not_power_of_two_or_zero); 1613 1614 // ------------------------------------------------------------------------- 1615 // Smi utilities. 1616 1617 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). 1618 void SmiTagCheckOverflow(Register reg, Register overflow); 1619 void SmiTagCheckOverflow(Register dst, Register src, Register overflow); 1620 1621 void SmiTag(Register dst, Register src) { 1622 STATIC_ASSERT(kSmiTag == 0); 1623 if (SmiValuesAre32Bits()) { 1624 STATIC_ASSERT(kSmiShift == 32); 1625 dsll32(dst, src, 0); 1626 } else { 1627 Addu(dst, src, src); 1628 } 1629 } 1630 1631 void SmiTag(Register reg) { 1632 SmiTag(reg, reg); 1633 } 1634 1635 // Try to convert int32 to smi. If the value is to large, preserve 1636 // the original value and jump to not_a_smi. Destroys scratch and 1637 // sets flags. 1638 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) { 1639 TrySmiTag(reg, reg, scratch, not_a_smi); 1640 } 1641 1642 void TrySmiTag(Register dst, 1643 Register src, 1644 Register scratch, 1645 Label* not_a_smi) { 1646 if (SmiValuesAre32Bits()) { 1647 SmiTag(dst, src); 1648 } else { 1649 SmiTagCheckOverflow(at, src, scratch); 1650 BranchOnOverflow(not_a_smi, scratch); 1651 mov(dst, at); 1652 } 1653 } 1654 1655 void SmiUntag(Register dst, Register src) { 1656 if (SmiValuesAre32Bits()) { 1657 STATIC_ASSERT(kSmiShift == 32); 1658 dsra32(dst, src, 0); 1659 } else { 1660 sra(dst, src, kSmiTagSize); 1661 } 1662 } 1663 1664 void SmiUntag(Register reg) { 1665 SmiUntag(reg, reg); 1666 } 1667 1668 // Left-shifted from int32 equivalent of Smi. 1669 void SmiScale(Register dst, Register src, int scale) { 1670 if (SmiValuesAre32Bits()) { 1671 // The int portion is upper 32-bits of 64-bit word. 1672 dsra(dst, src, kSmiShift - scale); 1673 } else { 1674 DCHECK(scale >= kSmiTagSize); 1675 sll(dst, src, scale - kSmiTagSize); 1676 } 1677 } 1678 1679 // Combine load with untagging or scaling. 1680 void SmiLoadUntag(Register dst, MemOperand src); 1681 1682 void SmiLoadScale(Register dst, MemOperand src, int scale); 1683 1684 // Returns 2 values: the Smi and a scaled version of the int within the Smi. 1685 void SmiLoadWithScale(Register d_smi, 1686 Register d_scaled, 1687 MemOperand src, 1688 int scale); 1689 1690 // Returns 2 values: the untagged Smi (int32) and scaled version of that int. 1691 void SmiLoadUntagWithScale(Register d_int, 1692 Register d_scaled, 1693 MemOperand src, 1694 int scale); 1695 1696 1697 // Test if the register contains a smi. 1698 inline void SmiTst(Register value, Register scratch) { 1699 And(scratch, value, Operand(kSmiTagMask)); 1700 } 1701 inline void NonNegativeSmiTst(Register value, Register scratch) { 1702 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask)); 1703 } 1704 1705 // Untag the source value into destination and jump if source is a smi. 1706 // Source and destination can be the same register. 1707 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1708 1709 // Untag the source value into destination and jump if source is not a smi. 1710 // Source and destination can be the same register. 1711 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1712 1713 // Jump the register contains a smi. 1714 void JumpIfSmi(Register value, 1715 Label* smi_label, 1716 Register scratch = at, 1717 BranchDelaySlot bd = PROTECT); 1718 1719 // Jump if the register contains a non-smi. 1720 void JumpIfNotSmi(Register value, 1721 Label* not_smi_label, 1722 Register scratch = at, 1723 BranchDelaySlot bd = PROTECT); 1724 1725 // Jump if either of the registers contain a non-smi. 1726 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1727 // Jump if either of the registers contain a smi. 1728 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1729 1730 // Abort execution if argument is a number, enabled via --debug-code. 1731 void AssertNotNumber(Register object); 1732 1733 // Abort execution if argument is a smi, enabled via --debug-code. 1734 void AssertNotSmi(Register object); 1735 void AssertSmi(Register object); 1736 1737 // Abort execution if argument is not a string, enabled via --debug-code. 1738 void AssertString(Register object); 1739 1740 // Abort execution if argument is not a name, enabled via --debug-code. 1741 void AssertName(Register object); 1742 1743 // Abort execution if argument is not a JSFunction, enabled via --debug-code. 1744 void AssertFunction(Register object); 1745 1746 // Abort execution if argument is not a JSBoundFunction, 1747 // enabled via --debug-code. 1748 void AssertBoundFunction(Register object); 1749 1750 // Abort execution if argument is not a JSGeneratorObject, 1751 // enabled via --debug-code. 1752 void AssertGeneratorObject(Register object); 1753 1754 // Abort execution if argument is not a JSReceiver, enabled via --debug-code. 1755 void AssertReceiver(Register object); 1756 1757 // Abort execution if argument is not undefined or an AllocationSite, enabled 1758 // via --debug-code. 1759 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1760 1761 // Abort execution if reg is not the root value with the given index, 1762 // enabled via --debug-code. 1763 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1764 1765 // --------------------------------------------------------------------------- 1766 // HeapNumber utilities. 1767 1768 void JumpIfNotHeapNumber(Register object, 1769 Register heap_number_map, 1770 Register scratch, 1771 Label* on_not_heap_number); 1772 1773 // ------------------------------------------------------------------------- 1774 // String utilities. 1775 1776 // Checks if both instance types are sequential one-byte strings and jumps to 1777 // label if either is not. 1778 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1779 Register first_object_instance_type, Register second_object_instance_type, 1780 Register scratch1, Register scratch2, Label* failure); 1781 1782 // Check if instance type is sequential one-byte string and jump to label if 1783 // it is not. 1784 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1785 Label* failure); 1786 1787 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); 1788 1789 void EmitSeqStringSetCharCheck(Register string, 1790 Register index, 1791 Register value, 1792 Register scratch, 1793 uint32_t encoding_mask); 1794 1795 // Checks if both objects are sequential one-byte strings and jumps to label 1796 // if either is not. Assumes that neither object is a smi. 1797 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first, 1798 Register second, 1799 Register scratch1, 1800 Register scratch2, 1801 Label* failure); 1802 1803 // Checks if both objects are sequential one-byte strings and jumps to label 1804 // if either is not. 1805 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1806 Register scratch1, 1807 Register scratch2, 1808 Label* not_flat_one_byte_strings); 1809 1810 void ClampUint8(Register output_reg, Register input_reg); 1811 1812 void ClampDoubleToUint8(Register result_reg, 1813 DoubleRegister input_reg, 1814 DoubleRegister temp_double_reg); 1815 1816 1817 void LoadInstanceDescriptors(Register map, Register descriptors); 1818 void EnumLength(Register dst, Register map); 1819 void NumberOfOwnDescriptors(Register dst, Register map); 1820 void LoadAccessor(Register dst, Register holder, int accessor_index, 1821 AccessorComponent accessor); 1822 1823 template<typename Field> 1824 void DecodeField(Register dst, Register src) { 1825 Ext(dst, src, Field::kShift, Field::kSize); 1826 } 1827 1828 template<typename Field> 1829 void DecodeField(Register reg) { 1830 DecodeField<Field>(reg, reg); 1831 } 1832 1833 template<typename Field> 1834 void DecodeFieldToSmi(Register dst, Register src) { 1835 static const int shift = Field::kShift; 1836 static const int mask = Field::kMask >> shift; 1837 dsrl(dst, src, shift); 1838 And(dst, dst, Operand(mask)); 1839 dsll32(dst, dst, 0); 1840 } 1841 1842 template<typename Field> 1843 void DecodeFieldToSmi(Register reg) { 1844 DecodeField<Field>(reg, reg); 1845 } 1846 // Generates function and stub prologue code. 1847 void StubPrologue(StackFrame::Type type); 1848 void Prologue(bool code_pre_aging); 1849 1850 // Load the type feedback vector from a JavaScript frame. 1851 void EmitLoadTypeFeedbackVector(Register vector); 1852 1853 // Activation support. 1854 void EnterFrame(StackFrame::Type type); 1855 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg); 1856 void LeaveFrame(StackFrame::Type type); 1857 1858 void EnterBuiltinFrame(Register context, Register target, Register argc); 1859 void LeaveBuiltinFrame(Register context, Register target, Register argc); 1860 1861 // Expects object in a0 and returns map with validated enum cache 1862 // in a0. Assumes that any other register can be used as a scratch. 1863 void CheckEnumCache(Label* call_runtime); 1864 1865 // AllocationMemento support. Arrays may have an associated AllocationMemento 1866 // object that can be checked for in order to pretransition to another type. 1867 // On entry, receiver_reg should point to the array object. scratch_reg gets 1868 // clobbered. If no info is present jump to no_memento_found, otherwise fall 1869 // through. 1870 void TestJSArrayForAllocationMemento(Register receiver_reg, 1871 Register scratch_reg, 1872 Label* no_memento_found); 1873 1874 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1875 Register scratch_reg, 1876 Label* memento_found) { 1877 Label no_memento_found; 1878 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1879 &no_memento_found); 1880 Branch(memento_found); 1881 bind(&no_memento_found); 1882 } 1883 1884 // Jumps to found label if a prototype map has dictionary elements. 1885 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1886 Register scratch1, Label* found); 1887 1888 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } 1889 1890 private: 1891 void CallCFunctionHelper(Register function, 1892 int num_reg_arguments, 1893 int num_double_arguments); 1894 1895 inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); 1896 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); 1897 void BranchShortHelperR6(int32_t offset, Label* L); 1898 void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); 1899 bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, 1900 Register rs, const Operand& rt); 1901 bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, 1902 const Operand& rt, BranchDelaySlot bdslot); 1903 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, 1904 const Operand& rt, BranchDelaySlot bdslot); 1905 1906 void BranchAndLinkShortHelperR6(int32_t offset, Label* L); 1907 void BranchAndLinkShortHelper(int16_t offset, Label* L, 1908 BranchDelaySlot bdslot); 1909 void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); 1910 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); 1911 bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, 1912 Register rs, const Operand& rt); 1913 bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, 1914 Register rs, const Operand& rt, 1915 BranchDelaySlot bdslot); 1916 bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, 1917 Register rs, const Operand& rt, 1918 BranchDelaySlot bdslot); 1919 void BranchLong(Label* L, BranchDelaySlot bdslot); 1920 void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); 1921 1922 // Common implementation of BranchF functions for the different formats. 1923 void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan, 1924 Condition cc, FPURegister cmp1, FPURegister cmp2, 1925 BranchDelaySlot bd = PROTECT); 1926 1927 void BranchShortF(SecondaryField sizeField, Label* target, Condition cc, 1928 FPURegister cmp1, FPURegister cmp2, 1929 BranchDelaySlot bd = PROTECT); 1930 1931 1932 // Helper functions for generating invokes. 1933 void InvokePrologue(const ParameterCount& expected, 1934 const ParameterCount& actual, 1935 Label* done, 1936 bool* definitely_mismatches, 1937 InvokeFlag flag, 1938 const CallWrapper& call_wrapper); 1939 1940 void InitializeNewString(Register string, 1941 Register length, 1942 Heap::RootListIndex map_index, 1943 Register scratch1, 1944 Register scratch2); 1945 1946 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1947 void InNewSpace(Register object, Register scratch, 1948 Condition cond, // ne for new space, eq otherwise. 1949 Label* branch); 1950 1951 // Helper for finding the mark bits for an address. Afterwards, the 1952 // bitmap register points at the word with the mark bits and the mask 1953 // the position of the first bit. Leaves addr_reg unchanged. 1954 inline void GetMarkBits(Register addr_reg, 1955 Register bitmap_reg, 1956 Register mask_reg); 1957 1958 // Compute memory operands for safepoint stack slots. 1959 static int SafepointRegisterStackIndex(int reg_code); 1960 MemOperand SafepointRegisterSlot(Register reg); 1961 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1962 1963 bool generating_stub_; 1964 bool has_frame_; 1965 bool has_double_zero_reg_set_; 1966 // This handle will be patched with the code object on installation. 1967 Handle<Object> code_object_; 1968 1969 // Needs access to SafepointRegisterStackIndex for compiled frame 1970 // traversal. 1971 friend class StandardFrame; 1972 }; 1973 1974 1975 // The code patcher is used to patch (typically) small parts of code e.g. for 1976 // debugging and other types of instrumentation. When using the code patcher 1977 // the exact number of bytes specified must be emitted. It is not legal to emit 1978 // relocation information. If any of these constraints are violated it causes 1979 // an assertion to fail. 1980 class CodePatcher { 1981 public: 1982 enum FlushICache { 1983 FLUSH, 1984 DONT_FLUSH 1985 }; 1986 1987 CodePatcher(Isolate* isolate, byte* address, int instructions, 1988 FlushICache flush_cache = FLUSH); 1989 ~CodePatcher(); 1990 1991 // Macro assembler to emit code. 1992 MacroAssembler* masm() { return &masm_; } 1993 1994 // Emit an instruction directly. 1995 void Emit(Instr instr); 1996 1997 // Emit an address directly. 1998 void Emit(Address addr); 1999 2000 // Change the condition part of an instruction leaving the rest of the current 2001 // instruction unchanged. 2002 void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode); 2003 2004 private: 2005 byte* address_; // The address of the code being patched. 2006 int size_; // Number of bytes of the expected patch size. 2007 MacroAssembler masm_; // Macro assembler used to generate the code. 2008 FlushICache flush_cache_; // Whether to flush the I cache after patching. 2009 }; 2010 2011 template <typename Func> 2012 void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, 2013 Func GetLabelFunction) { 2014 // Ensure that dd-ed labels following this instruction use 8 bytes aligned 2015 // addresses. 2016 if (kArchVariant >= kMips64r6) { 2017 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6); 2018 // Opposite of Align(8) as we have odd number of instructions in this case. 2019 if ((pc_offset() & 7) == 0) { 2020 nop(); 2021 } 2022 addiupc(at, 5); 2023 Dlsa(at, at, index, kPointerSizeLog2); 2024 ld(at, MemOperand(at)); 2025 } else { 2026 Label here; 2027 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11); 2028 Align(8); 2029 push(ra); 2030 bal(&here); 2031 dsll(at, index, kPointerSizeLog2); // Branch delay slot. 2032 bind(&here); 2033 daddu(at, at, ra); 2034 pop(ra); 2035 ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize)); 2036 } 2037 jr(at); 2038 nop(); // Branch delay slot nop. 2039 for (size_t index = 0; index < case_count; ++index) { 2040 dd(GetLabelFunction(index)); 2041 } 2042 } 2043 2044 #define ACCESS_MASM(masm) masm-> 2045 2046 } // namespace internal 2047 } // namespace v8 2048 2049 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ 2050