1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 29 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 30 31 #include "assembler.h" 32 #include "frames.h" 33 #include "v8globals.h" 34 35 namespace v8 { 36 namespace internal { 37 38 // ---------------------------------------------------------------------------- 39 // Static helper functions 40 41 // Generate a MemOperand for loading a field from an object. 42 inline MemOperand FieldMemOperand(Register object, int offset) { 43 return MemOperand(object, offset - kHeapObjectTag); 44 } 45 46 47 // Give alias names to registers 48 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer. 49 const Register pp = { kRegister_r8_Code }; // Constant pool pointer. 50 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer. 51 52 // Flags used for AllocateHeapNumber 53 enum TaggingMode { 54 // Tag the result. 55 TAG_RESULT, 56 // Don't tag 57 DONT_TAG_RESULT 58 }; 59 60 61 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 62 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 63 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 64 65 66 Register GetRegisterThatIsNotOneOf(Register reg1, 67 Register reg2 = no_reg, 68 Register reg3 = no_reg, 69 Register reg4 = no_reg, 70 Register reg5 = no_reg, 71 Register reg6 = no_reg); 72 73 74 #ifdef DEBUG 75 bool AreAliased(Register reg1, 76 Register reg2, 77 Register reg3 = no_reg, 78 Register reg4 = no_reg, 79 Register reg5 = no_reg, 80 Register reg6 = no_reg); 81 #endif 82 83 84 enum TargetAddressStorageMode { 85 CAN_INLINE_TARGET_ADDRESS, 86 NEVER_INLINE_TARGET_ADDRESS 87 }; 88 89 // MacroAssembler implements a collection of frequently used macros. 90 class MacroAssembler: public Assembler { 91 public: 92 // The isolate parameter can be NULL if the macro assembler should 93 // not use isolate-dependent functionality. In this case, it's the 94 // responsibility of the caller to never invoke such function on the 95 // macro assembler. 96 MacroAssembler(Isolate* isolate, void* buffer, int size); 97 98 // Jump, Call, and Ret pseudo instructions implementing inter-working. 99 void Jump(Register target, Condition cond = al); 100 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 101 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 102 static int CallSize(Register target, Condition cond = al); 103 void Call(Register target, Condition cond = al); 104 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 105 static int CallSizeNotPredictableCodeSize(Address target, 106 RelocInfo::Mode rmode, 107 Condition cond = al); 108 void Call(Address target, RelocInfo::Mode rmode, 109 Condition cond = al, 110 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 111 int CallSize(Handle<Code> code, 112 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 113 TypeFeedbackId ast_id = TypeFeedbackId::None(), 114 Condition cond = al); 115 void Call(Handle<Code> code, 116 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 117 TypeFeedbackId ast_id = TypeFeedbackId::None(), 118 Condition cond = al, 119 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 120 void Ret(Condition cond = al); 121 122 // Emit code to discard a non-negative number of pointer-sized elements 123 // from the stack, clobbering only the sp register. 124 void Drop(int count, Condition cond = al); 125 126 void Ret(int drop, Condition cond = al); 127 128 // Swap two registers. If the scratch register is omitted then a slightly 129 // less efficient form using xor instead of mov is emitted. 130 void Swap(Register reg1, 131 Register reg2, 132 Register scratch = no_reg, 133 Condition cond = al); 134 135 136 void And(Register dst, Register src1, const Operand& src2, 137 Condition cond = al); 138 void Ubfx(Register dst, Register src, int lsb, int width, 139 Condition cond = al); 140 void Sbfx(Register dst, Register src, int lsb, int width, 141 Condition cond = al); 142 // The scratch register is not used for ARMv7. 143 // scratch can be the same register as src (in which case it is trashed), but 144 // not the same as dst. 145 void Bfi(Register dst, 146 Register src, 147 Register scratch, 148 int lsb, 149 int width, 150 Condition cond = al); 151 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); 152 void Usat(Register dst, int satpos, const Operand& src, 153 Condition cond = al); 154 155 void Call(Label* target); 156 void Push(Register src) { push(src); } 157 void Pop(Register dst) { pop(dst); } 158 159 // Register move. May do nothing if the registers are identical. 160 void Move(Register dst, Handle<Object> value); 161 void Move(Register dst, Register src, Condition cond = al); 162 void Move(DwVfpRegister dst, DwVfpRegister src); 163 164 void Load(Register dst, const MemOperand& src, Representation r); 165 void Store(Register src, const MemOperand& dst, Representation r); 166 167 // Load an object from the root table. 168 void LoadRoot(Register destination, 169 Heap::RootListIndex index, 170 Condition cond = al); 171 // Store an object to the root table. 172 void StoreRoot(Register source, 173 Heap::RootListIndex index, 174 Condition cond = al); 175 176 // --------------------------------------------------------------------------- 177 // GC Support 178 179 void IncrementalMarkingRecordWriteHelper(Register object, 180 Register value, 181 Register address); 182 183 enum RememberedSetFinalAction { 184 kReturnAtEnd, 185 kFallThroughAtEnd 186 }; 187 188 // Record in the remembered set the fact that we have a pointer to new space 189 // at the address pointed to by the addr register. Only works if addr is not 190 // in new space. 191 void RememberedSetHelper(Register object, // Used for debug code. 192 Register addr, 193 Register scratch, 194 SaveFPRegsMode save_fp, 195 RememberedSetFinalAction and_then); 196 197 void CheckPageFlag(Register object, 198 Register scratch, 199 int mask, 200 Condition cc, 201 Label* condition_met); 202 203 void CheckMapDeprecated(Handle<Map> map, 204 Register scratch, 205 Label* if_deprecated); 206 207 // Check if object is in new space. Jumps if the object is not in new space. 208 // The register scratch can be object itself, but scratch will be clobbered. 209 void JumpIfNotInNewSpace(Register object, 210 Register scratch, 211 Label* branch) { 212 InNewSpace(object, scratch, ne, branch); 213 } 214 215 // Check if object is in new space. Jumps if the object is in new space. 216 // The register scratch can be object itself, but it will be clobbered. 217 void JumpIfInNewSpace(Register object, 218 Register scratch, 219 Label* branch) { 220 InNewSpace(object, scratch, eq, branch); 221 } 222 223 // Check if an object has a given incremental marking color. 224 void HasColor(Register object, 225 Register scratch0, 226 Register scratch1, 227 Label* has_color, 228 int first_bit, 229 int second_bit); 230 231 void JumpIfBlack(Register object, 232 Register scratch0, 233 Register scratch1, 234 Label* on_black); 235 236 // Checks the color of an object. If the object is already grey or black 237 // then we just fall through, since it is already live. If it is white and 238 // we can determine that it doesn't need to be scanned, then we just mark it 239 // black and fall through. For the rest we jump to the label so the 240 // incremental marker can fix its assumptions. 241 void EnsureNotWhite(Register object, 242 Register scratch1, 243 Register scratch2, 244 Register scratch3, 245 Label* object_is_white_and_not_data); 246 247 // Detects conservatively whether an object is data-only, i.e. it does need to 248 // be scanned by the garbage collector. 249 void JumpIfDataObject(Register value, 250 Register scratch, 251 Label* not_data_object); 252 253 // Notify the garbage collector that we wrote a pointer into an object. 254 // |object| is the object being stored into, |value| is the object being 255 // stored. value and scratch registers are clobbered by the operation. 256 // The offset is the offset from the start of the object, not the offset from 257 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 258 void RecordWriteField( 259 Register object, 260 int offset, 261 Register value, 262 Register scratch, 263 LinkRegisterStatus lr_status, 264 SaveFPRegsMode save_fp, 265 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 266 SmiCheck smi_check = INLINE_SMI_CHECK); 267 268 // As above, but the offset has the tag presubtracted. For use with 269 // MemOperand(reg, off). 270 inline void RecordWriteContextSlot( 271 Register context, 272 int offset, 273 Register value, 274 Register scratch, 275 LinkRegisterStatus lr_status, 276 SaveFPRegsMode save_fp, 277 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 278 SmiCheck smi_check = INLINE_SMI_CHECK) { 279 RecordWriteField(context, 280 offset + kHeapObjectTag, 281 value, 282 scratch, 283 lr_status, 284 save_fp, 285 remembered_set_action, 286 smi_check); 287 } 288 289 // For a given |object| notify the garbage collector that the slot |address| 290 // has been written. |value| is the object being stored. The value and 291 // address registers are clobbered by the operation. 292 void RecordWrite( 293 Register object, 294 Register address, 295 Register value, 296 LinkRegisterStatus lr_status, 297 SaveFPRegsMode save_fp, 298 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 299 SmiCheck smi_check = INLINE_SMI_CHECK); 300 301 // Push a handle. 302 void Push(Handle<Object> handle); 303 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 304 305 // Push two registers. Pushes leftmost register first (to highest address). 306 void Push(Register src1, Register src2, Condition cond = al) { 307 ASSERT(!src1.is(src2)); 308 if (src1.code() > src2.code()) { 309 stm(db_w, sp, src1.bit() | src2.bit(), cond); 310 } else { 311 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 312 str(src2, MemOperand(sp, 4, NegPreIndex), cond); 313 } 314 } 315 316 // Push three registers. Pushes leftmost register first (to highest address). 317 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 318 ASSERT(!src1.is(src2)); 319 ASSERT(!src2.is(src3)); 320 ASSERT(!src1.is(src3)); 321 if (src1.code() > src2.code()) { 322 if (src2.code() > src3.code()) { 323 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 324 } else { 325 stm(db_w, sp, src1.bit() | src2.bit(), cond); 326 str(src3, MemOperand(sp, 4, NegPreIndex), cond); 327 } 328 } else { 329 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 330 Push(src2, src3, cond); 331 } 332 } 333 334 // Push four registers. Pushes leftmost register first (to highest address). 335 void Push(Register src1, 336 Register src2, 337 Register src3, 338 Register src4, 339 Condition cond = al) { 340 ASSERT(!src1.is(src2)); 341 ASSERT(!src2.is(src3)); 342 ASSERT(!src1.is(src3)); 343 ASSERT(!src1.is(src4)); 344 ASSERT(!src2.is(src4)); 345 ASSERT(!src3.is(src4)); 346 if (src1.code() > src2.code()) { 347 if (src2.code() > src3.code()) { 348 if (src3.code() > src4.code()) { 349 stm(db_w, 350 sp, 351 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 352 cond); 353 } else { 354 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 355 str(src4, MemOperand(sp, 4, NegPreIndex), cond); 356 } 357 } else { 358 stm(db_w, sp, src1.bit() | src2.bit(), cond); 359 Push(src3, src4, cond); 360 } 361 } else { 362 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 363 Push(src2, src3, src4, cond); 364 } 365 } 366 367 // Pop two registers. Pops rightmost register first (from lower address). 368 void Pop(Register src1, Register src2, Condition cond = al) { 369 ASSERT(!src1.is(src2)); 370 if (src1.code() > src2.code()) { 371 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 372 } else { 373 ldr(src2, MemOperand(sp, 4, PostIndex), cond); 374 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 375 } 376 } 377 378 // Pop three registers. Pops rightmost register first (from lower address). 379 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { 380 ASSERT(!src1.is(src2)); 381 ASSERT(!src2.is(src3)); 382 ASSERT(!src1.is(src3)); 383 if (src1.code() > src2.code()) { 384 if (src2.code() > src3.code()) { 385 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 386 } else { 387 ldr(src3, MemOperand(sp, 4, PostIndex), cond); 388 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 389 } 390 } else { 391 Pop(src2, src3, cond); 392 str(src1, MemOperand(sp, 4, PostIndex), cond); 393 } 394 } 395 396 // Pop four registers. Pops rightmost register first (from lower address). 397 void Pop(Register src1, 398 Register src2, 399 Register src3, 400 Register src4, 401 Condition cond = al) { 402 ASSERT(!src1.is(src2)); 403 ASSERT(!src2.is(src3)); 404 ASSERT(!src1.is(src3)); 405 ASSERT(!src1.is(src4)); 406 ASSERT(!src2.is(src4)); 407 ASSERT(!src3.is(src4)); 408 if (src1.code() > src2.code()) { 409 if (src2.code() > src3.code()) { 410 if (src3.code() > src4.code()) { 411 ldm(ia_w, 412 sp, 413 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 414 cond); 415 } else { 416 ldr(src4, MemOperand(sp, 4, PostIndex), cond); 417 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 418 } 419 } else { 420 Pop(src3, src4, cond); 421 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 422 } 423 } else { 424 Pop(src2, src3, src4, cond); 425 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 426 } 427 } 428 429 // Push and pop the registers that can hold pointers, as defined by the 430 // RegList constant kSafepointSavedRegisters. 431 void PushSafepointRegisters(); 432 void PopSafepointRegisters(); 433 void PushSafepointRegistersAndDoubles(); 434 void PopSafepointRegistersAndDoubles(); 435 // Store value in register src in the safepoint stack slot for 436 // register dst. 437 void StoreToSafepointRegisterSlot(Register src, Register dst); 438 void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); 439 // Load the value of the src register from its safepoint stack slot 440 // into register dst. 441 void LoadFromSafepointRegisterSlot(Register dst, Register src); 442 443 // Load two consecutive registers with two consecutive memory locations. 444 void Ldrd(Register dst1, 445 Register dst2, 446 const MemOperand& src, 447 Condition cond = al); 448 449 // Store two consecutive registers to two consecutive memory locations. 450 void Strd(Register src1, 451 Register src2, 452 const MemOperand& dst, 453 Condition cond = al); 454 455 // Ensure that FPSCR contains values needed by JavaScript. 456 // We need the NaNModeControlBit to be sure that operations like 457 // vadd and vsub generate the Canonical NaN (if a NaN must be generated). 458 // In VFP3 it will be always the Canonical NaN. 459 // In VFP2 it will be either the Canonical NaN or the negative version 460 // of the Canonical NaN. It doesn't matter if we have two values. The aim 461 // is to be sure to never generate the hole NaN. 462 void VFPEnsureFPSCRState(Register scratch); 463 464 // If the value is a NaN, canonicalize the value else, do nothing. 465 void VFPCanonicalizeNaN(const DwVfpRegister dst, 466 const DwVfpRegister src, 467 const Condition cond = al); 468 void VFPCanonicalizeNaN(const DwVfpRegister value, 469 const Condition cond = al) { 470 VFPCanonicalizeNaN(value, value, cond); 471 } 472 473 // Compare double values and move the result to the normal condition flags. 474 void VFPCompareAndSetFlags(const DwVfpRegister src1, 475 const DwVfpRegister src2, 476 const Condition cond = al); 477 void VFPCompareAndSetFlags(const DwVfpRegister src1, 478 const double src2, 479 const Condition cond = al); 480 481 // Compare double values and then load the fpscr flags to a register. 482 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 483 const DwVfpRegister src2, 484 const Register fpscr_flags, 485 const Condition cond = al); 486 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 487 const double src2, 488 const Register fpscr_flags, 489 const Condition cond = al); 490 491 void Vmov(const DwVfpRegister dst, 492 const double imm, 493 const Register scratch = no_reg); 494 495 void VmovHigh(Register dst, DwVfpRegister src); 496 void VmovHigh(DwVfpRegister dst, Register src); 497 void VmovLow(Register dst, DwVfpRegister src); 498 void VmovLow(DwVfpRegister dst, Register src); 499 500 // Loads the number from object into dst register. 501 // If |object| is neither smi nor heap number, |not_number| is jumped to 502 // with |object| still intact. 503 void LoadNumber(Register object, 504 LowDwVfpRegister dst, 505 Register heap_number_map, 506 Register scratch, 507 Label* not_number); 508 509 // Loads the number from object into double_dst in the double format. 510 // Control will jump to not_int32 if the value cannot be exactly represented 511 // by a 32-bit integer. 512 // Floating point value in the 32-bit integer range that are not exact integer 513 // won't be loaded. 514 void LoadNumberAsInt32Double(Register object, 515 DwVfpRegister double_dst, 516 Register heap_number_map, 517 Register scratch, 518 LowDwVfpRegister double_scratch, 519 Label* not_int32); 520 521 // Loads the number from object into dst as a 32-bit integer. 522 // Control will jump to not_int32 if the object cannot be exactly represented 523 // by a 32-bit integer. 524 // Floating point value in the 32-bit integer range that are not exact integer 525 // won't be converted. 526 void LoadNumberAsInt32(Register object, 527 Register dst, 528 Register heap_number_map, 529 Register scratch, 530 DwVfpRegister double_scratch0, 531 LowDwVfpRegister double_scratch1, 532 Label* not_int32); 533 534 // Generates function and stub prologue code. 535 void Prologue(PrologueFrameMode frame_mode); 536 537 // Enter exit frame. 538 // stack_space - extra stack space, used for alignment before call to C. 539 void EnterExitFrame(bool save_doubles, int stack_space = 0); 540 541 // Leave the current exit frame. Expects the return value in r0. 542 // Expect the number of values, pushed prior to the exit frame, to 543 // remove in a register (or no_reg, if there is nothing to remove). 544 void LeaveExitFrame(bool save_doubles, 545 Register argument_count, 546 bool restore_context); 547 548 // Get the actual activation frame alignment for target environment. 549 static int ActivationFrameAlignment(); 550 551 void LoadContext(Register dst, int context_chain_length); 552 553 // Conditionally load the cached Array transitioned map of type 554 // transitioned_kind from the native context if the map in register 555 // map_in_out is the cached Array map in the native context of 556 // expected_kind. 557 void LoadTransitionedArrayMapConditional( 558 ElementsKind expected_kind, 559 ElementsKind transitioned_kind, 560 Register map_in_out, 561 Register scratch, 562 Label* no_map_match); 563 564 // Load the initial map for new Arrays from a JSFunction. 565 void LoadInitialArrayMap(Register function_in, 566 Register scratch, 567 Register map_out, 568 bool can_have_holes); 569 570 void LoadGlobalFunction(int index, Register function); 571 void LoadArrayFunction(Register function); 572 573 // Load the initial map from the global function. The registers 574 // function and map can be the same, function is then overwritten. 575 void LoadGlobalFunctionInitialMap(Register function, 576 Register map, 577 Register scratch); 578 579 void InitializeRootRegister() { 580 ExternalReference roots_array_start = 581 ExternalReference::roots_array_start(isolate()); 582 mov(kRootRegister, Operand(roots_array_start)); 583 } 584 585 // --------------------------------------------------------------------------- 586 // JavaScript invokes 587 588 // Set up call kind marking in ecx. The method takes ecx as an 589 // explicit first parameter to make the code more readable at the 590 // call sites. 591 void SetCallKind(Register dst, CallKind kind); 592 593 // Invoke the JavaScript function code by either calling or jumping. 594 void InvokeCode(Register code, 595 const ParameterCount& expected, 596 const ParameterCount& actual, 597 InvokeFlag flag, 598 const CallWrapper& call_wrapper, 599 CallKind call_kind); 600 601 void InvokeCode(Handle<Code> code, 602 const ParameterCount& expected, 603 const ParameterCount& actual, 604 RelocInfo::Mode rmode, 605 InvokeFlag flag, 606 CallKind call_kind); 607 608 // Invoke the JavaScript function in the given register. Changes the 609 // current context to the context in the function before invoking. 610 void InvokeFunction(Register function, 611 const ParameterCount& actual, 612 InvokeFlag flag, 613 const CallWrapper& call_wrapper, 614 CallKind call_kind); 615 616 void InvokeFunction(Register function, 617 const ParameterCount& expected, 618 const ParameterCount& actual, 619 InvokeFlag flag, 620 const CallWrapper& call_wrapper, 621 CallKind call_kind); 622 623 void InvokeFunction(Handle<JSFunction> function, 624 const ParameterCount& expected, 625 const ParameterCount& actual, 626 InvokeFlag flag, 627 const CallWrapper& call_wrapper, 628 CallKind call_kind); 629 630 void IsObjectJSObjectType(Register heap_object, 631 Register map, 632 Register scratch, 633 Label* fail); 634 635 void IsInstanceJSObjectType(Register map, 636 Register scratch, 637 Label* fail); 638 639 void IsObjectJSStringType(Register object, 640 Register scratch, 641 Label* fail); 642 643 void IsObjectNameType(Register object, 644 Register scratch, 645 Label* fail); 646 647 #ifdef ENABLE_DEBUGGER_SUPPORT 648 // --------------------------------------------------------------------------- 649 // Debugger Support 650 651 void DebugBreak(); 652 #endif 653 654 // --------------------------------------------------------------------------- 655 // Exception handling 656 657 // Push a new try handler and link into try handler chain. 658 void PushTryHandler(StackHandler::Kind kind, int handler_index); 659 660 // Unlink the stack handler on top of the stack from the try handler chain. 661 // Must preserve the result register. 662 void PopTryHandler(); 663 664 // Passes thrown value to the handler of top of the try handler chain. 665 void Throw(Register value); 666 667 // Propagates an uncatchable exception to the top of the current JS stack's 668 // handler chain. 669 void ThrowUncatchable(Register value); 670 671 // Throw a message string as an exception. 672 void Throw(BailoutReason reason); 673 674 // Throw a message string as an exception if a condition is not true. 675 void ThrowIf(Condition cc, BailoutReason reason); 676 677 // --------------------------------------------------------------------------- 678 // Inline caching support 679 680 // Generate code for checking access rights - used for security checks 681 // on access to global objects across environments. The holder register 682 // is left untouched, whereas both scratch registers are clobbered. 683 void CheckAccessGlobalProxy(Register holder_reg, 684 Register scratch, 685 Label* miss); 686 687 void GetNumberHash(Register t0, Register scratch); 688 689 void LoadFromNumberDictionary(Label* miss, 690 Register elements, 691 Register key, 692 Register result, 693 Register t0, 694 Register t1, 695 Register t2); 696 697 698 inline void MarkCode(NopMarkerTypes type) { 699 nop(type); 700 } 701 702 // Check if the given instruction is a 'type' marker. 703 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 704 // These instructions are generated to mark special location in the code, 705 // like some special IC code. 706 static inline bool IsMarkedCode(Instr instr, int type) { 707 ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 708 return IsNop(instr, type); 709 } 710 711 712 static inline int GetCodeMarker(Instr instr) { 713 int dst_reg_offset = 12; 714 int dst_mask = 0xf << dst_reg_offset; 715 int src_mask = 0xf; 716 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 717 int src_reg = instr & src_mask; 718 uint32_t non_register_mask = ~(dst_mask | src_mask); 719 uint32_t mov_mask = al | 13 << 21; 720 721 // Return <n> if we have a mov rn rn, else return -1. 722 int type = ((instr & non_register_mask) == mov_mask) && 723 (dst_reg == src_reg) && 724 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER) 725 ? src_reg 726 : -1; 727 ASSERT((type == -1) || 728 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 729 return type; 730 } 731 732 733 // --------------------------------------------------------------------------- 734 // Allocation support 735 736 // Allocate an object in new space or old pointer space. The object_size is 737 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 738 // is passed. If the space is exhausted control continues at the gc_required 739 // label. The allocated object is returned in result. If the flag 740 // tag_allocated_object is true the result is tagged as as a heap object. 741 // All registers are clobbered also when control continues at the gc_required 742 // label. 743 void Allocate(int object_size, 744 Register result, 745 Register scratch1, 746 Register scratch2, 747 Label* gc_required, 748 AllocationFlags flags); 749 750 void Allocate(Register object_size, 751 Register result, 752 Register scratch1, 753 Register scratch2, 754 Label* gc_required, 755 AllocationFlags flags); 756 757 // Undo allocation in new space. The object passed and objects allocated after 758 // it will no longer be allocated. The caller must make sure that no pointers 759 // are left to the object(s) no longer allocated as they would be invalid when 760 // allocation is undone. 761 void UndoAllocationInNewSpace(Register object, Register scratch); 762 763 764 void AllocateTwoByteString(Register result, 765 Register length, 766 Register scratch1, 767 Register scratch2, 768 Register scratch3, 769 Label* gc_required); 770 void AllocateAsciiString(Register result, 771 Register length, 772 Register scratch1, 773 Register scratch2, 774 Register scratch3, 775 Label* gc_required); 776 void AllocateTwoByteConsString(Register result, 777 Register length, 778 Register scratch1, 779 Register scratch2, 780 Label* gc_required); 781 void AllocateAsciiConsString(Register result, 782 Register length, 783 Register scratch1, 784 Register scratch2, 785 Label* gc_required); 786 void AllocateTwoByteSlicedString(Register result, 787 Register length, 788 Register scratch1, 789 Register scratch2, 790 Label* gc_required); 791 void AllocateAsciiSlicedString(Register result, 792 Register length, 793 Register scratch1, 794 Register scratch2, 795 Label* gc_required); 796 797 // Allocates a heap number or jumps to the gc_required label if the young 798 // space is full and a scavenge is needed. All registers are clobbered also 799 // when control continues at the gc_required label. 800 void AllocateHeapNumber(Register result, 801 Register scratch1, 802 Register scratch2, 803 Register heap_number_map, 804 Label* gc_required, 805 TaggingMode tagging_mode = TAG_RESULT); 806 void AllocateHeapNumberWithValue(Register result, 807 DwVfpRegister value, 808 Register scratch1, 809 Register scratch2, 810 Register heap_number_map, 811 Label* gc_required); 812 813 // Copies a fixed number of fields of heap objects from src to dst. 814 void CopyFields(Register dst, 815 Register src, 816 LowDwVfpRegister double_scratch, 817 int field_count); 818 819 // Copies a number of bytes from src to dst. All registers are clobbered. On 820 // exit src and dst will point to the place just after where the last byte was 821 // read or written and length will be zero. 822 void CopyBytes(Register src, 823 Register dst, 824 Register length, 825 Register scratch); 826 827 // Initialize fields with filler values. Fields starting at |start_offset| 828 // not including end_offset are overwritten with the value in |filler|. At 829 // the end the loop, |start_offset| takes the value of |end_offset|. 830 void InitializeFieldsWithFiller(Register start_offset, 831 Register end_offset, 832 Register filler); 833 834 // --------------------------------------------------------------------------- 835 // Support functions. 836 837 // Try to get function prototype of a function and puts the value in 838 // the result register. Checks that the function really is a 839 // function and jumps to the miss label if the fast checks fail. The 840 // function register will be untouched; the other registers may be 841 // clobbered. 842 void TryGetFunctionPrototype(Register function, 843 Register result, 844 Register scratch, 845 Label* miss, 846 bool miss_on_bound_function = false); 847 848 // Compare object type for heap object. heap_object contains a non-Smi 849 // whose object type should be compared with the given type. This both 850 // sets the flags and leaves the object type in the type_reg register. 851 // It leaves the map in the map register (unless the type_reg and map register 852 // are the same register). It leaves the heap object in the heap_object 853 // register unless the heap_object register is the same register as one of the 854 // other registers. 855 // Type_reg can be no_reg. In that case ip is used. 856 void CompareObjectType(Register heap_object, 857 Register map, 858 Register type_reg, 859 InstanceType type); 860 861 // Compare object type for heap object. Branch to false_label if type 862 // is lower than min_type or greater than max_type. 863 // Load map into the register map. 864 void CheckObjectTypeRange(Register heap_object, 865 Register map, 866 InstanceType min_type, 867 InstanceType max_type, 868 Label* false_label); 869 870 // Compare instance type in a map. map contains a valid map object whose 871 // object type should be compared with the given type. This both 872 // sets the flags and leaves the object type in the type_reg register. 873 void CompareInstanceType(Register map, 874 Register type_reg, 875 InstanceType type); 876 877 878 // Check if a map for a JSObject indicates that the object has fast elements. 879 // Jump to the specified label if it does not. 880 void CheckFastElements(Register map, 881 Register scratch, 882 Label* fail); 883 884 // Check if a map for a JSObject indicates that the object can have both smi 885 // and HeapObject elements. Jump to the specified label if it does not. 886 void CheckFastObjectElements(Register map, 887 Register scratch, 888 Label* fail); 889 890 // Check if a map for a JSObject indicates that the object has fast smi only 891 // elements. Jump to the specified label if it does not. 892 void CheckFastSmiElements(Register map, 893 Register scratch, 894 Label* fail); 895 896 // Check to see if maybe_number can be stored as a double in 897 // FastDoubleElements. If it can, store it at the index specified by key in 898 // the FastDoubleElements array elements. Otherwise jump to fail. 899 void StoreNumberToDoubleElements(Register value_reg, 900 Register key_reg, 901 Register elements_reg, 902 Register scratch1, 903 LowDwVfpRegister double_scratch, 904 Label* fail, 905 int elements_offset = 0); 906 907 // Compare an object's map with the specified map and its transitioned 908 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 909 // set with result of map compare. If multiple map compares are required, the 910 // compare sequences branches to early_success. 911 void CompareMap(Register obj, 912 Register scratch, 913 Handle<Map> map, 914 Label* early_success); 915 916 // As above, but the map of the object is already loaded into the register 917 // which is preserved by the code generated. 918 void CompareMap(Register obj_map, 919 Handle<Map> map, 920 Label* early_success); 921 922 // Check if the map of an object is equal to a specified map and branch to 923 // label if not. Skip the smi check if not required (object is known to be a 924 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 925 // against maps that are ElementsKind transition maps of the specified map. 926 void CheckMap(Register obj, 927 Register scratch, 928 Handle<Map> map, 929 Label* fail, 930 SmiCheckType smi_check_type); 931 932 933 void CheckMap(Register obj, 934 Register scratch, 935 Heap::RootListIndex index, 936 Label* fail, 937 SmiCheckType smi_check_type); 938 939 940 // Check if the map of an object is equal to a specified map and branch to a 941 // specified target if equal. Skip the smi check if not required (object is 942 // known to be a heap object) 943 void DispatchMap(Register obj, 944 Register scratch, 945 Handle<Map> map, 946 Handle<Code> success, 947 SmiCheckType smi_check_type); 948 949 950 // Compare the object in a register to a value from the root list. 951 // Uses the ip register as scratch. 952 void CompareRoot(Register obj, Heap::RootListIndex index); 953 954 955 // Load and check the instance type of an object for being a string. 956 // Loads the type into the second argument register. 957 // Returns a condition that will be enabled if the object was a string 958 // and the passed-in condition passed. If the passed-in condition failed 959 // then flags remain unchanged. 960 Condition IsObjectStringType(Register obj, 961 Register type, 962 Condition cond = al) { 963 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); 964 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); 965 tst(type, Operand(kIsNotStringMask), cond); 966 ASSERT_EQ(0, kStringTag); 967 return eq; 968 } 969 970 971 // Generates code for reporting that an illegal operation has 972 // occurred. 973 void IllegalOperation(int num_arguments); 974 975 // Picks out an array index from the hash field. 976 // Register use: 977 // hash - holds the index's hash. Clobbered. 978 // index - holds the overwritten index on exit. 979 void IndexFromHash(Register hash, Register index); 980 981 // Get the number of least significant bits from a register 982 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 983 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 984 985 // Load the value of a smi object into a double register. 986 // The register value must be between d0 and d15. 987 void SmiToDouble(LowDwVfpRegister value, Register smi); 988 989 // Check if a double can be exactly represented as a signed 32-bit integer. 990 // Z flag set to one if true. 991 void TestDoubleIsInt32(DwVfpRegister double_input, 992 LowDwVfpRegister double_scratch); 993 994 // Try to convert a double to a signed 32-bit integer. 995 // Z flag set to one and result assigned if the conversion is exact. 996 void TryDoubleToInt32Exact(Register result, 997 DwVfpRegister double_input, 998 LowDwVfpRegister double_scratch); 999 1000 // Floor a double and writes the value to the result register. 1001 // Go to exact if the conversion is exact (to be able to test -0), 1002 // fall through calling code if an overflow occurred, else go to done. 1003 // In return, input_high is loaded with high bits of input. 1004 void TryInt32Floor(Register result, 1005 DwVfpRegister double_input, 1006 Register input_high, 1007 LowDwVfpRegister double_scratch, 1008 Label* done, 1009 Label* exact); 1010 1011 // Performs a truncating conversion of a floating point number as used by 1012 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 1013 // succeeds, otherwise falls through if result is saturated. On return 1014 // 'result' either holds answer, or is clobbered on fall through. 1015 // 1016 // Only public for the test code in test-code-stubs-arm.cc. 1017 void TryInlineTruncateDoubleToI(Register result, 1018 DwVfpRegister input, 1019 Label* done); 1020 1021 // Performs a truncating conversion of a floating point number as used by 1022 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1023 // Exits with 'result' holding the answer. 1024 void TruncateDoubleToI(Register result, DwVfpRegister double_input); 1025 1026 // Performs a truncating conversion of a heap number as used by 1027 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1028 // must be different registers. Exits with 'result' holding the answer. 1029 void TruncateHeapNumberToI(Register result, Register object); 1030 1031 // Converts the smi or heap number in object to an int32 using the rules 1032 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1033 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1034 // different registers. 1035 void TruncateNumberToI(Register object, 1036 Register result, 1037 Register heap_number_map, 1038 Register scratch1, 1039 Label* not_int32); 1040 1041 // Check whether d16-d31 are available on the CPU. The result is given by the 1042 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. 1043 void CheckFor32DRegs(Register scratch); 1044 1045 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double 1046 // values to location, saving [d0..(d15|d31)]. 1047 void SaveFPRegs(Register location, Register scratch); 1048 1049 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double 1050 // values to location, restoring [d0..(d15|d31)]. 1051 void RestoreFPRegs(Register location, Register scratch); 1052 1053 // --------------------------------------------------------------------------- 1054 // Runtime calls 1055 1056 // Call a code stub. 1057 void CallStub(CodeStub* stub, 1058 TypeFeedbackId ast_id = TypeFeedbackId::None(), 1059 Condition cond = al); 1060 1061 // Call a code stub. 1062 void TailCallStub(CodeStub* stub, Condition cond = al); 1063 1064 // Call a runtime routine. 1065 void CallRuntime(const Runtime::Function* f, 1066 int num_arguments, 1067 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1068 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { 1069 const Runtime::Function* function = Runtime::FunctionForId(id); 1070 CallRuntime(function, function->nargs, kSaveFPRegs); 1071 } 1072 1073 // Convenience function: Same as above, but takes the fid instead. 1074 void CallRuntime(Runtime::FunctionId id, 1075 int num_arguments, 1076 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1077 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); 1078 } 1079 1080 // Convenience function: call an external reference. 1081 void CallExternalReference(const ExternalReference& ext, 1082 int num_arguments); 1083 1084 // Tail call of a runtime routine (jump). 1085 // Like JumpToExternalReference, but also takes care of passing the number 1086 // of parameters. 1087 void TailCallExternalReference(const ExternalReference& ext, 1088 int num_arguments, 1089 int result_size); 1090 1091 // Convenience function: tail call a runtime routine (jump). 1092 void TailCallRuntime(Runtime::FunctionId fid, 1093 int num_arguments, 1094 int result_size); 1095 1096 int CalculateStackPassedWords(int num_reg_arguments, 1097 int num_double_arguments); 1098 1099 // Before calling a C-function from generated code, align arguments on stack. 1100 // After aligning the frame, non-register arguments must be stored in 1101 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 1102 // are word sized. If double arguments are used, this function assumes that 1103 // all double arguments are stored before core registers; otherwise the 1104 // correct alignment of the double values is not guaranteed. 1105 // Some compilers/platforms require the stack to be aligned when calling 1106 // C++ code. 1107 // Needs a scratch register to do some arithmetic. This register will be 1108 // trashed. 1109 void PrepareCallCFunction(int num_reg_arguments, 1110 int num_double_registers, 1111 Register scratch); 1112 void PrepareCallCFunction(int num_reg_arguments, 1113 Register scratch); 1114 1115 // There are two ways of passing double arguments on ARM, depending on 1116 // whether soft or hard floating point ABI is used. These functions 1117 // abstract parameter passing for the three different ways we call 1118 // C functions from generated code. 1119 void SetCallCDoubleArguments(DwVfpRegister dreg); 1120 void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2); 1121 void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg); 1122 1123 // Calls a C function and cleans up the space for arguments allocated 1124 // by PrepareCallCFunction. The called function is not allowed to trigger a 1125 // garbage collection, since that might move the code and invalidate the 1126 // return address (unless this is somehow accounted for by the called 1127 // function). 1128 void CallCFunction(ExternalReference function, int num_arguments); 1129 void CallCFunction(Register function, int num_arguments); 1130 void CallCFunction(ExternalReference function, 1131 int num_reg_arguments, 1132 int num_double_arguments); 1133 void CallCFunction(Register function, 1134 int num_reg_arguments, 1135 int num_double_arguments); 1136 1137 void GetCFunctionDoubleResult(const DwVfpRegister dst); 1138 1139 // Calls an API function. Allocates HandleScope, extracts returned value 1140 // from handle and propagates exceptions. Restores context. stack_space 1141 // - space to be unwound on exit (includes the call JS arguments space and 1142 // the additional space allocated for the fast call). 1143 void CallApiFunctionAndReturn(ExternalReference function, 1144 Address function_address, 1145 ExternalReference thunk_ref, 1146 Register thunk_last_arg, 1147 int stack_space, 1148 MemOperand return_value_operand, 1149 MemOperand* context_restore_operand); 1150 1151 // Jump to a runtime routine. 1152 void JumpToExternalReference(const ExternalReference& builtin); 1153 1154 // Invoke specified builtin JavaScript function. Adds an entry to 1155 // the unresolved list if the name does not resolve. 1156 void InvokeBuiltin(Builtins::JavaScript id, 1157 InvokeFlag flag, 1158 const CallWrapper& call_wrapper = NullCallWrapper()); 1159 1160 // Store the code object for the given builtin in the target register and 1161 // setup the function in r1. 1162 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 1163 1164 // Store the function for the given builtin in the target register. 1165 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 1166 1167 Handle<Object> CodeObject() { 1168 ASSERT(!code_object_.is_null()); 1169 return code_object_; 1170 } 1171 1172 1173 // --------------------------------------------------------------------------- 1174 // StatsCounter support 1175 1176 void SetCounter(StatsCounter* counter, int value, 1177 Register scratch1, Register scratch2); 1178 void IncrementCounter(StatsCounter* counter, int value, 1179 Register scratch1, Register scratch2); 1180 void DecrementCounter(StatsCounter* counter, int value, 1181 Register scratch1, Register scratch2); 1182 1183 1184 // --------------------------------------------------------------------------- 1185 // Debugging 1186 1187 // Calls Abort(msg) if the condition cond is not satisfied. 1188 // Use --debug_code to enable. 1189 void Assert(Condition cond, BailoutReason reason); 1190 void AssertFastElements(Register elements); 1191 1192 // Like Assert(), but always enabled. 1193 void Check(Condition cond, BailoutReason reason); 1194 1195 // Print a message to stdout and abort execution. 1196 void Abort(BailoutReason msg); 1197 1198 // Verify restrictions about code generated in stubs. 1199 void set_generating_stub(bool value) { generating_stub_ = value; } 1200 bool generating_stub() { return generating_stub_; } 1201 void set_has_frame(bool value) { has_frame_ = value; } 1202 bool has_frame() { return has_frame_; } 1203 inline bool AllowThisStubCall(CodeStub* stub); 1204 1205 // EABI variant for double arguments in use. 1206 bool use_eabi_hardfloat() { 1207 #ifdef __arm__ 1208 return OS::ArmUsingHardFloat(); 1209 #elif USE_EABI_HARDFLOAT 1210 return true; 1211 #else 1212 return false; 1213 #endif 1214 } 1215 1216 // --------------------------------------------------------------------------- 1217 // Number utilities 1218 1219 // Check whether the value of reg is a power of two and not zero. If not 1220 // control continues at the label not_power_of_two. If reg is a power of two 1221 // the register scratch contains the value of (reg - 1) when control falls 1222 // through. 1223 void JumpIfNotPowerOfTwoOrZero(Register reg, 1224 Register scratch, 1225 Label* not_power_of_two_or_zero); 1226 // Check whether the value of reg is a power of two and not zero. 1227 // Control falls through if it is, with scratch containing the mask 1228 // value (reg - 1). 1229 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1230 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1231 // strictly positive but not a power of two. 1232 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, 1233 Register scratch, 1234 Label* zero_and_neg, 1235 Label* not_power_of_two); 1236 1237 // --------------------------------------------------------------------------- 1238 // Smi utilities 1239 1240 void SmiTag(Register reg, SBit s = LeaveCC) { 1241 add(reg, reg, Operand(reg), s); 1242 } 1243 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { 1244 add(dst, src, Operand(src), s); 1245 } 1246 1247 // Try to convert int32 to smi. If the value is to large, preserve 1248 // the original value and jump to not_a_smi. Destroys scratch and 1249 // sets flags. 1250 void TrySmiTag(Register reg, Label* not_a_smi) { 1251 TrySmiTag(reg, reg, not_a_smi); 1252 } 1253 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { 1254 SmiTag(ip, src, SetCC); 1255 b(vs, not_a_smi); 1256 mov(reg, ip); 1257 } 1258 1259 1260 void SmiUntag(Register reg, SBit s = LeaveCC) { 1261 mov(reg, Operand::SmiUntag(reg), s); 1262 } 1263 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { 1264 mov(dst, Operand::SmiUntag(src), s); 1265 } 1266 1267 // Untag the source value into destination and jump if source is a smi. 1268 // Souce and destination can be the same register. 1269 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1270 1271 // Untag the source value into destination and jump if source is not a smi. 1272 // Souce and destination can be the same register. 1273 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1274 1275 // Test if the register contains a smi (Z == 0 (eq) if true). 1276 inline void SmiTst(Register value) { 1277 tst(value, Operand(kSmiTagMask)); 1278 } 1279 inline void NonNegativeSmiTst(Register value) { 1280 tst(value, Operand(kSmiTagMask | kSmiSignMask)); 1281 } 1282 // Jump if the register contains a smi. 1283 inline void JumpIfSmi(Register value, Label* smi_label) { 1284 tst(value, Operand(kSmiTagMask)); 1285 b(eq, smi_label); 1286 } 1287 // Jump if either of the registers contain a non-smi. 1288 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1289 tst(value, Operand(kSmiTagMask)); 1290 b(ne, not_smi_label); 1291 } 1292 // Jump if either of the registers contain a non-smi. 1293 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1294 // Jump if either of the registers contain a smi. 1295 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1296 1297 // Abort execution if argument is a smi, enabled via --debug-code. 1298 void AssertNotSmi(Register object); 1299 void AssertSmi(Register object); 1300 1301 // Abort execution if argument is not a string, enabled via --debug-code. 1302 void AssertString(Register object); 1303 1304 // Abort execution if argument is not a name, enabled via --debug-code. 1305 void AssertName(Register object); 1306 1307 // Abort execution if reg is not the root value with the given index, 1308 // enabled via --debug-code. 1309 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1310 1311 // --------------------------------------------------------------------------- 1312 // HeapNumber utilities 1313 1314 void JumpIfNotHeapNumber(Register object, 1315 Register heap_number_map, 1316 Register scratch, 1317 Label* on_not_heap_number); 1318 1319 // --------------------------------------------------------------------------- 1320 // String utilities 1321 1322 // Generate code to do a lookup in the number string cache. If the number in 1323 // the register object is found in the cache the generated code falls through 1324 // with the result in the result register. The object and the result register 1325 // can be the same. If the number is not found in the cache the code jumps to 1326 // the label not_found with only the content of register object unchanged. 1327 void LookupNumberStringCache(Register object, 1328 Register result, 1329 Register scratch1, 1330 Register scratch2, 1331 Register scratch3, 1332 Label* not_found); 1333 1334 // Checks if both objects are sequential ASCII strings and jumps to label 1335 // if either is not. Assumes that neither object is a smi. 1336 void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, 1337 Register object2, 1338 Register scratch1, 1339 Register scratch2, 1340 Label* failure); 1341 1342 // Checks if both objects are sequential ASCII strings and jumps to label 1343 // if either is not. 1344 void JumpIfNotBothSequentialAsciiStrings(Register first, 1345 Register second, 1346 Register scratch1, 1347 Register scratch2, 1348 Label* not_flat_ascii_strings); 1349 1350 // Checks if both instance types are sequential ASCII strings and jumps to 1351 // label if either is not. 1352 void JumpIfBothInstanceTypesAreNotSequentialAscii( 1353 Register first_object_instance_type, 1354 Register second_object_instance_type, 1355 Register scratch1, 1356 Register scratch2, 1357 Label* failure); 1358 1359 // Check if instance type is sequential ASCII string and jump to label if 1360 // it is not. 1361 void JumpIfInstanceTypeIsNotSequentialAscii(Register type, 1362 Register scratch, 1363 Label* failure); 1364 1365 void JumpIfNotUniqueName(Register reg, Label* not_unique_name); 1366 1367 void EmitSeqStringSetCharCheck(Register string, 1368 Register index, 1369 Register value, 1370 uint32_t encoding_mask); 1371 1372 // --------------------------------------------------------------------------- 1373 // Patching helpers. 1374 1375 // Get the location of a relocated constant (its address in the constant pool) 1376 // from its load site. 1377 void GetRelocatedValueLocation(Register ldr_location, 1378 Register result); 1379 1380 1381 void ClampUint8(Register output_reg, Register input_reg); 1382 1383 void ClampDoubleToUint8(Register result_reg, 1384 DwVfpRegister input_reg, 1385 LowDwVfpRegister double_scratch); 1386 1387 1388 void LoadInstanceDescriptors(Register map, Register descriptors); 1389 void EnumLength(Register dst, Register map); 1390 void NumberOfOwnDescriptors(Register dst, Register map); 1391 1392 template<typename Field> 1393 void DecodeField(Register reg) { 1394 static const int shift = Field::kShift; 1395 static const int mask = (Field::kMask >> shift) << kSmiTagSize; 1396 mov(reg, Operand(reg, LSR, shift)); 1397 and_(reg, reg, Operand(mask)); 1398 } 1399 1400 // Activation support. 1401 void EnterFrame(StackFrame::Type type); 1402 void LeaveFrame(StackFrame::Type type); 1403 1404 // Expects object in r0 and returns map with validated enum cache 1405 // in r0. Assumes that any other register can be used as a scratch. 1406 void CheckEnumCache(Register null_value, Label* call_runtime); 1407 1408 // AllocationMemento support. Arrays may have an associated 1409 // AllocationMemento object that can be checked for in order to pretransition 1410 // to another type. 1411 // On entry, receiver_reg should point to the array object. 1412 // scratch_reg gets clobbered. 1413 // If allocation info is present, condition flags are set to eq. 1414 void TestJSArrayForAllocationMemento(Register receiver_reg, 1415 Register scratch_reg, 1416 Label* no_memento_found); 1417 1418 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1419 Register scratch_reg, 1420 Label* memento_found) { 1421 Label no_memento_found; 1422 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1423 &no_memento_found); 1424 b(eq, memento_found); 1425 bind(&no_memento_found); 1426 } 1427 1428 // Jumps to found label if a prototype map has dictionary elements. 1429 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1430 Register scratch1, Label* found); 1431 1432 private: 1433 void CallCFunctionHelper(Register function, 1434 int num_reg_arguments, 1435 int num_double_arguments); 1436 1437 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1438 1439 // Helper functions for generating invokes. 1440 void InvokePrologue(const ParameterCount& expected, 1441 const ParameterCount& actual, 1442 Handle<Code> code_constant, 1443 Register code_reg, 1444 Label* done, 1445 bool* definitely_mismatches, 1446 InvokeFlag flag, 1447 const CallWrapper& call_wrapper, 1448 CallKind call_kind); 1449 1450 void InitializeNewString(Register string, 1451 Register length, 1452 Heap::RootListIndex map_index, 1453 Register scratch1, 1454 Register scratch2); 1455 1456 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1457 void InNewSpace(Register object, 1458 Register scratch, 1459 Condition cond, // eq for new space, ne otherwise. 1460 Label* branch); 1461 1462 // Helper for finding the mark bits for an address. Afterwards, the 1463 // bitmap register points at the word with the mark bits and the mask 1464 // the position of the first bit. Leaves addr_reg unchanged. 1465 inline void GetMarkBits(Register addr_reg, 1466 Register bitmap_reg, 1467 Register mask_reg); 1468 1469 // Helper for throwing exceptions. Compute a handler address and jump to 1470 // it. See the implementation for register usage. 1471 void JumpToHandlerEntry(); 1472 1473 // Compute memory operands for safepoint stack slots. 1474 static int SafepointRegisterStackIndex(int reg_code); 1475 MemOperand SafepointRegisterSlot(Register reg); 1476 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1477 1478 bool generating_stub_; 1479 bool has_frame_; 1480 // This handle will be patched with the code object on installation. 1481 Handle<Object> code_object_; 1482 1483 // Needs access to SafepointRegisterStackIndex for compiled frame 1484 // traversal. 1485 friend class StandardFrame; 1486 }; 1487 1488 1489 // The code patcher is used to patch (typically) small parts of code e.g. for 1490 // debugging and other types of instrumentation. When using the code patcher 1491 // the exact number of bytes specified must be emitted. It is not legal to emit 1492 // relocation information. If any of these constraints are violated it causes 1493 // an assertion to fail. 1494 class CodePatcher { 1495 public: 1496 enum FlushICache { 1497 FLUSH, 1498 DONT_FLUSH 1499 }; 1500 1501 CodePatcher(byte* address, 1502 int instructions, 1503 FlushICache flush_cache = FLUSH); 1504 virtual ~CodePatcher(); 1505 1506 // Macro assembler to emit code. 1507 MacroAssembler* masm() { return &masm_; } 1508 1509 // Emit an instruction directly. 1510 void Emit(Instr instr); 1511 1512 // Emit an address directly. 1513 void Emit(Address addr); 1514 1515 // Emit the condition part of an instruction leaving the rest of the current 1516 // instruction unchanged. 1517 void EmitCondition(Condition cond); 1518 1519 private: 1520 byte* address_; // The address of the code being patched. 1521 int size_; // Number of bytes of the expected patch size. 1522 MacroAssembler masm_; // Macro assembler used to generate the code. 1523 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1524 }; 1525 1526 1527 // ----------------------------------------------------------------------------- 1528 // Static helper functions. 1529 1530 inline MemOperand ContextOperand(Register context, int index) { 1531 return MemOperand(context, Context::SlotOffset(index)); 1532 } 1533 1534 1535 inline MemOperand GlobalObjectOperand() { 1536 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); 1537 } 1538 1539 1540 #ifdef GENERATED_CODE_COVERAGE 1541 #define CODE_COVERAGE_STRINGIFY(x) #x 1542 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1543 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1544 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 1545 #else 1546 #define ACCESS_MASM(masm) masm-> 1547 #endif 1548 1549 1550 } } // namespace v8::internal 1551 1552 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ 1553