1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 7 8 #include "src/assembler.h" 9 #include "src/bailout-reason.h" 10 #include "src/frames.h" 11 #include "src/globals.h" 12 13 namespace v8 { 14 namespace internal { 15 16 // Give alias names to registers for calling conventions. 17 const Register kReturnRegister0 = {Register::kCode_r0}; 18 const Register kReturnRegister1 = {Register::kCode_r1}; 19 const Register kJSFunctionRegister = {Register::kCode_r1}; 20 const Register kContextRegister = {Register::kCode_r7}; 21 const Register kInterpreterAccumulatorRegister = {Register::kCode_r0}; 22 const Register kInterpreterRegisterFileRegister = {Register::kCode_r4}; 23 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5}; 24 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6}; 25 const Register kInterpreterDispatchTableRegister = {Register::kCode_r8}; 26 const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0}; 27 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3}; 28 const Register kRuntimeCallFunctionRegister = {Register::kCode_r1}; 29 const Register kRuntimeCallArgCountRegister = {Register::kCode_r0}; 30 31 // ---------------------------------------------------------------------------- 32 // Static helper functions 33 34 // Generate a MemOperand for loading a field from an object. 35 inline MemOperand FieldMemOperand(Register object, int offset) { 36 return MemOperand(object, offset - kHeapObjectTag); 37 } 38 39 40 // Give alias names to registers 41 const Register cp = {Register::kCode_r7}; // JavaScript context pointer. 42 const Register pp = {Register::kCode_r8}; // Constant pool pointer. 43 const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer. 44 45 // Flags used for AllocateHeapNumber 46 enum TaggingMode { 47 // Tag the result. 48 TAG_RESULT, 49 // Don't tag 50 DONT_TAG_RESULT 51 }; 52 53 54 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 55 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 56 enum PointersToHereCheck { 57 kPointersToHereMaybeInteresting, 58 kPointersToHereAreAlwaysInteresting 59 }; 60 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 61 62 63 Register GetRegisterThatIsNotOneOf(Register reg1, 64 Register reg2 = no_reg, 65 Register reg3 = no_reg, 66 Register reg4 = no_reg, 67 Register reg5 = no_reg, 68 Register reg6 = no_reg); 69 70 71 #ifdef DEBUG 72 bool AreAliased(Register reg1, 73 Register reg2, 74 Register reg3 = no_reg, 75 Register reg4 = no_reg, 76 Register reg5 = no_reg, 77 Register reg6 = no_reg, 78 Register reg7 = no_reg, 79 Register reg8 = no_reg); 80 #endif 81 82 83 enum TargetAddressStorageMode { 84 CAN_INLINE_TARGET_ADDRESS, 85 NEVER_INLINE_TARGET_ADDRESS 86 }; 87 88 // MacroAssembler implements a collection of frequently used macros. 89 class MacroAssembler: public Assembler { 90 public: 91 MacroAssembler(Isolate* isolate, void* buffer, int size, 92 CodeObjectRequired create_code_object); 93 94 95 // Returns the size of a call in instructions. Note, the value returned is 96 // only valid as long as no entries are added to the constant pool between 97 // checking the call size and emitting the actual call. 98 static int CallSize(Register target, Condition cond = al); 99 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 100 int CallStubSize(CodeStub* stub, 101 TypeFeedbackId ast_id = TypeFeedbackId::None(), 102 Condition cond = al); 103 static int CallSizeNotPredictableCodeSize(Isolate* isolate, 104 Address target, 105 RelocInfo::Mode rmode, 106 Condition cond = al); 107 108 // Jump, Call, and Ret pseudo instructions implementing inter-working. 109 void Jump(Register target, Condition cond = al); 110 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 111 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 112 void Call(Register target, Condition cond = al); 113 void Call(Address target, RelocInfo::Mode rmode, 114 Condition cond = al, 115 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 116 int CallSize(Handle<Code> code, 117 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 118 TypeFeedbackId ast_id = TypeFeedbackId::None(), 119 Condition cond = al); 120 void Call(Handle<Code> code, 121 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 122 TypeFeedbackId ast_id = TypeFeedbackId::None(), 123 Condition cond = al, 124 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 125 void Ret(Condition cond = al); 126 127 // Emit code to discard a non-negative number of pointer-sized elements 128 // from the stack, clobbering only the sp register. 129 void Drop(int count, Condition cond = al); 130 131 void Ret(int drop, Condition cond = al); 132 133 // Swap two registers. If the scratch register is omitted then a slightly 134 // less efficient form using xor instead of mov is emitted. 135 void Swap(Register reg1, 136 Register reg2, 137 Register scratch = no_reg, 138 Condition cond = al); 139 140 void Mls(Register dst, Register src1, Register src2, Register srcA, 141 Condition cond = al); 142 void And(Register dst, Register src1, const Operand& src2, 143 Condition cond = al); 144 void Ubfx(Register dst, Register src, int lsb, int width, 145 Condition cond = al); 146 void Sbfx(Register dst, Register src, int lsb, int width, 147 Condition cond = al); 148 // The scratch register is not used for ARMv7. 149 // scratch can be the same register as src (in which case it is trashed), but 150 // not the same as dst. 151 void Bfi(Register dst, 152 Register src, 153 Register scratch, 154 int lsb, 155 int width, 156 Condition cond = al); 157 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); 158 void Usat(Register dst, int satpos, const Operand& src, 159 Condition cond = al); 160 161 void Call(Label* target); 162 void Push(Register src) { push(src); } 163 void Pop(Register dst) { pop(dst); } 164 165 // Register move. May do nothing if the registers are identical. 166 void Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); } 167 void Move(Register dst, Handle<Object> value); 168 void Move(Register dst, Register src, Condition cond = al); 169 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, 170 Condition cond = al) { 171 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) { 172 mov(dst, src, sbit, cond); 173 } 174 } 175 void Move(DwVfpRegister dst, DwVfpRegister src); 176 177 void Load(Register dst, const MemOperand& src, Representation r); 178 void Store(Register src, const MemOperand& dst, Representation r); 179 180 // Load an object from the root table. 181 void LoadRoot(Register destination, 182 Heap::RootListIndex index, 183 Condition cond = al); 184 // Store an object to the root table. 185 void StoreRoot(Register source, 186 Heap::RootListIndex index, 187 Condition cond = al); 188 189 // --------------------------------------------------------------------------- 190 // GC Support 191 192 void IncrementalMarkingRecordWriteHelper(Register object, 193 Register value, 194 Register address); 195 196 enum RememberedSetFinalAction { 197 kReturnAtEnd, 198 kFallThroughAtEnd 199 }; 200 201 // Record in the remembered set the fact that we have a pointer to new space 202 // at the address pointed to by the addr register. Only works if addr is not 203 // in new space. 204 void RememberedSetHelper(Register object, // Used for debug code. 205 Register addr, 206 Register scratch, 207 SaveFPRegsMode save_fp, 208 RememberedSetFinalAction and_then); 209 210 void CheckPageFlag(Register object, 211 Register scratch, 212 int mask, 213 Condition cc, 214 Label* condition_met); 215 216 // Check if object is in new space. Jumps if the object is not in new space. 217 // The register scratch can be object itself, but scratch will be clobbered. 218 void JumpIfNotInNewSpace(Register object, 219 Register scratch, 220 Label* branch) { 221 InNewSpace(object, scratch, ne, branch); 222 } 223 224 // Check if object is in new space. Jumps if the object is in new space. 225 // The register scratch can be object itself, but it will be clobbered. 226 void JumpIfInNewSpace(Register object, 227 Register scratch, 228 Label* branch) { 229 InNewSpace(object, scratch, eq, branch); 230 } 231 232 // Check if an object has a given incremental marking color. 233 void HasColor(Register object, 234 Register scratch0, 235 Register scratch1, 236 Label* has_color, 237 int first_bit, 238 int second_bit); 239 240 void JumpIfBlack(Register object, 241 Register scratch0, 242 Register scratch1, 243 Label* on_black); 244 245 // Checks the color of an object. If the object is white we jump to the 246 // incremental marker. 247 void JumpIfWhite(Register value, Register scratch1, Register scratch2, 248 Register scratch3, Label* value_is_white); 249 250 // Notify the garbage collector that we wrote a pointer into an object. 251 // |object| is the object being stored into, |value| is the object being 252 // stored. value and scratch registers are clobbered by the operation. 253 // The offset is the offset from the start of the object, not the offset from 254 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). 255 void RecordWriteField( 256 Register object, 257 int offset, 258 Register value, 259 Register scratch, 260 LinkRegisterStatus lr_status, 261 SaveFPRegsMode save_fp, 262 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 263 SmiCheck smi_check = INLINE_SMI_CHECK, 264 PointersToHereCheck pointers_to_here_check_for_value = 265 kPointersToHereMaybeInteresting); 266 267 // As above, but the offset has the tag presubtracted. For use with 268 // MemOperand(reg, off). 269 inline void RecordWriteContextSlot( 270 Register context, 271 int offset, 272 Register value, 273 Register scratch, 274 LinkRegisterStatus lr_status, 275 SaveFPRegsMode save_fp, 276 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 277 SmiCheck smi_check = INLINE_SMI_CHECK, 278 PointersToHereCheck pointers_to_here_check_for_value = 279 kPointersToHereMaybeInteresting) { 280 RecordWriteField(context, 281 offset + kHeapObjectTag, 282 value, 283 scratch, 284 lr_status, 285 save_fp, 286 remembered_set_action, 287 smi_check, 288 pointers_to_here_check_for_value); 289 } 290 291 void RecordWriteForMap( 292 Register object, 293 Register map, 294 Register dst, 295 LinkRegisterStatus lr_status, 296 SaveFPRegsMode save_fp); 297 298 // For a given |object| notify the garbage collector that the slot |address| 299 // has been written. |value| is the object being stored. The value and 300 // address registers are clobbered by the operation. 301 void RecordWrite( 302 Register object, 303 Register address, 304 Register value, 305 LinkRegisterStatus lr_status, 306 SaveFPRegsMode save_fp, 307 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 308 SmiCheck smi_check = INLINE_SMI_CHECK, 309 PointersToHereCheck pointers_to_here_check_for_value = 310 kPointersToHereMaybeInteresting); 311 312 // Push a handle. 313 void Push(Handle<Object> handle); 314 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 315 316 // Push two registers. Pushes leftmost register first (to highest address). 317 void Push(Register src1, Register src2, Condition cond = al) { 318 DCHECK(!src1.is(src2)); 319 if (src1.code() > src2.code()) { 320 stm(db_w, sp, src1.bit() | src2.bit(), cond); 321 } else { 322 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 323 str(src2, MemOperand(sp, 4, NegPreIndex), cond); 324 } 325 } 326 327 // Push three registers. Pushes leftmost register first (to highest address). 328 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 329 DCHECK(!AreAliased(src1, src2, src3)); 330 if (src1.code() > src2.code()) { 331 if (src2.code() > src3.code()) { 332 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 333 } else { 334 stm(db_w, sp, src1.bit() | src2.bit(), cond); 335 str(src3, MemOperand(sp, 4, NegPreIndex), cond); 336 } 337 } else { 338 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 339 Push(src2, src3, cond); 340 } 341 } 342 343 // Push four registers. Pushes leftmost register first (to highest address). 344 void Push(Register src1, 345 Register src2, 346 Register src3, 347 Register src4, 348 Condition cond = al) { 349 DCHECK(!AreAliased(src1, src2, src3, src4)); 350 if (src1.code() > src2.code()) { 351 if (src2.code() > src3.code()) { 352 if (src3.code() > src4.code()) { 353 stm(db_w, 354 sp, 355 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 356 cond); 357 } else { 358 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 359 str(src4, MemOperand(sp, 4, NegPreIndex), cond); 360 } 361 } else { 362 stm(db_w, sp, src1.bit() | src2.bit(), cond); 363 Push(src3, src4, cond); 364 } 365 } else { 366 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 367 Push(src2, src3, src4, cond); 368 } 369 } 370 371 // Push five registers. Pushes leftmost register first (to highest address). 372 void Push(Register src1, Register src2, Register src3, Register src4, 373 Register src5, Condition cond = al) { 374 DCHECK(!AreAliased(src1, src2, src3, src4, src5)); 375 if (src1.code() > src2.code()) { 376 if (src2.code() > src3.code()) { 377 if (src3.code() > src4.code()) { 378 if (src4.code() > src5.code()) { 379 stm(db_w, sp, 380 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(), 381 cond); 382 } else { 383 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(), 384 cond); 385 str(src5, MemOperand(sp, 4, NegPreIndex), cond); 386 } 387 } else { 388 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 389 Push(src4, src5, cond); 390 } 391 } else { 392 stm(db_w, sp, src1.bit() | src2.bit(), cond); 393 Push(src3, src4, src5, cond); 394 } 395 } else { 396 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 397 Push(src2, src3, src4, src5, cond); 398 } 399 } 400 401 // Pop two registers. Pops rightmost register first (from lower address). 402 void Pop(Register src1, Register src2, Condition cond = al) { 403 DCHECK(!src1.is(src2)); 404 if (src1.code() > src2.code()) { 405 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 406 } else { 407 ldr(src2, MemOperand(sp, 4, PostIndex), cond); 408 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 409 } 410 } 411 412 // Pop three registers. Pops rightmost register first (from lower address). 413 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { 414 DCHECK(!AreAliased(src1, src2, src3)); 415 if (src1.code() > src2.code()) { 416 if (src2.code() > src3.code()) { 417 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 418 } else { 419 ldr(src3, MemOperand(sp, 4, PostIndex), cond); 420 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 421 } 422 } else { 423 Pop(src2, src3, cond); 424 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 425 } 426 } 427 428 // Pop four registers. Pops rightmost register first (from lower address). 429 void Pop(Register src1, 430 Register src2, 431 Register src3, 432 Register src4, 433 Condition cond = al) { 434 DCHECK(!AreAliased(src1, src2, src3, src4)); 435 if (src1.code() > src2.code()) { 436 if (src2.code() > src3.code()) { 437 if (src3.code() > src4.code()) { 438 ldm(ia_w, 439 sp, 440 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 441 cond); 442 } else { 443 ldr(src4, MemOperand(sp, 4, PostIndex), cond); 444 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 445 } 446 } else { 447 Pop(src3, src4, cond); 448 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 449 } 450 } else { 451 Pop(src2, src3, src4, cond); 452 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 453 } 454 } 455 456 // Push a fixed frame, consisting of lr, fp, constant pool (if 457 // FLAG_enable_embedded_constant_pool), context and JS function / marker id if 458 // marker_reg is a valid register. 459 void PushFixedFrame(Register marker_reg = no_reg); 460 void PopFixedFrame(Register marker_reg = no_reg); 461 462 // Push and pop the registers that can hold pointers, as defined by the 463 // RegList constant kSafepointSavedRegisters. 464 void PushSafepointRegisters(); 465 void PopSafepointRegisters(); 466 // Store value in register src in the safepoint stack slot for 467 // register dst. 468 void StoreToSafepointRegisterSlot(Register src, Register dst); 469 // Load the value of the src register from its safepoint stack slot 470 // into register dst. 471 void LoadFromSafepointRegisterSlot(Register dst, Register src); 472 473 // Load two consecutive registers with two consecutive memory locations. 474 void Ldrd(Register dst1, 475 Register dst2, 476 const MemOperand& src, 477 Condition cond = al); 478 479 // Store two consecutive registers to two consecutive memory locations. 480 void Strd(Register src1, 481 Register src2, 482 const MemOperand& dst, 483 Condition cond = al); 484 485 // Ensure that FPSCR contains values needed by JavaScript. 486 // We need the NaNModeControlBit to be sure that operations like 487 // vadd and vsub generate the Canonical NaN (if a NaN must be generated). 488 // In VFP3 it will be always the Canonical NaN. 489 // In VFP2 it will be either the Canonical NaN or the negative version 490 // of the Canonical NaN. It doesn't matter if we have two values. The aim 491 // is to be sure to never generate the hole NaN. 492 void VFPEnsureFPSCRState(Register scratch); 493 494 // If the value is a NaN, canonicalize the value else, do nothing. 495 void VFPCanonicalizeNaN(const DwVfpRegister dst, 496 const DwVfpRegister src, 497 const Condition cond = al); 498 void VFPCanonicalizeNaN(const DwVfpRegister value, 499 const Condition cond = al) { 500 VFPCanonicalizeNaN(value, value, cond); 501 } 502 503 // Compare single values and move the result to the normal condition flags. 504 void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, 505 const Condition cond = al); 506 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, 507 const Condition cond = al); 508 509 // Compare double values and move the result to the normal condition flags. 510 void VFPCompareAndSetFlags(const DwVfpRegister src1, 511 const DwVfpRegister src2, 512 const Condition cond = al); 513 void VFPCompareAndSetFlags(const DwVfpRegister src1, 514 const double src2, 515 const Condition cond = al); 516 517 // Compare single values and then load the fpscr flags to a register. 518 void VFPCompareAndLoadFlags(const SwVfpRegister src1, 519 const SwVfpRegister src2, 520 const Register fpscr_flags, 521 const Condition cond = al); 522 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, 523 const Register fpscr_flags, 524 const Condition cond = al); 525 526 // Compare double values and then load the fpscr flags to a register. 527 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 528 const DwVfpRegister src2, 529 const Register fpscr_flags, 530 const Condition cond = al); 531 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 532 const double src2, 533 const Register fpscr_flags, 534 const Condition cond = al); 535 536 void Vmov(const DwVfpRegister dst, 537 const double imm, 538 const Register scratch = no_reg); 539 540 void VmovHigh(Register dst, DwVfpRegister src); 541 void VmovHigh(DwVfpRegister dst, Register src); 542 void VmovLow(Register dst, DwVfpRegister src); 543 void VmovLow(DwVfpRegister dst, Register src); 544 545 // Loads the number from object into dst register. 546 // If |object| is neither smi nor heap number, |not_number| is jumped to 547 // with |object| still intact. 548 void LoadNumber(Register object, 549 LowDwVfpRegister dst, 550 Register heap_number_map, 551 Register scratch, 552 Label* not_number); 553 554 // Loads the number from object into double_dst in the double format. 555 // Control will jump to not_int32 if the value cannot be exactly represented 556 // by a 32-bit integer. 557 // Floating point value in the 32-bit integer range that are not exact integer 558 // won't be loaded. 559 void LoadNumberAsInt32Double(Register object, 560 DwVfpRegister double_dst, 561 Register heap_number_map, 562 Register scratch, 563 LowDwVfpRegister double_scratch, 564 Label* not_int32); 565 566 // Loads the number from object into dst as a 32-bit integer. 567 // Control will jump to not_int32 if the object cannot be exactly represented 568 // by a 32-bit integer. 569 // Floating point value in the 32-bit integer range that are not exact integer 570 // won't be converted. 571 void LoadNumberAsInt32(Register object, 572 Register dst, 573 Register heap_number_map, 574 Register scratch, 575 DwVfpRegister double_scratch0, 576 LowDwVfpRegister double_scratch1, 577 Label* not_int32); 578 579 // Generates function and stub prologue code. 580 void StubPrologue(); 581 void Prologue(bool code_pre_aging); 582 583 // Enter exit frame. 584 // stack_space - extra stack space, used for alignment before call to C. 585 void EnterExitFrame(bool save_doubles, int stack_space = 0); 586 587 // Leave the current exit frame. Expects the return value in r0. 588 // Expect the number of values, pushed prior to the exit frame, to 589 // remove in a register (or no_reg, if there is nothing to remove). 590 void LeaveExitFrame(bool save_doubles, Register argument_count, 591 bool restore_context, 592 bool argument_count_is_length = false); 593 594 // Get the actual activation frame alignment for target environment. 595 static int ActivationFrameAlignment(); 596 597 void LoadContext(Register dst, int context_chain_length); 598 599 // Load the global object from the current context. 600 void LoadGlobalObject(Register dst) { 601 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst); 602 } 603 604 // Load the global proxy from the current context. 605 void LoadGlobalProxy(Register dst) { 606 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); 607 } 608 609 // Conditionally load the cached Array transitioned map of type 610 // transitioned_kind from the native context if the map in register 611 // map_in_out is the cached Array map in the native context of 612 // expected_kind. 613 void LoadTransitionedArrayMapConditional( 614 ElementsKind expected_kind, 615 ElementsKind transitioned_kind, 616 Register map_in_out, 617 Register scratch, 618 Label* no_map_match); 619 620 void LoadNativeContextSlot(int index, Register dst); 621 622 // Load the initial map from the global function. The registers 623 // function and map can be the same, function is then overwritten. 624 void LoadGlobalFunctionInitialMap(Register function, 625 Register map, 626 Register scratch); 627 628 void InitializeRootRegister() { 629 ExternalReference roots_array_start = 630 ExternalReference::roots_array_start(isolate()); 631 mov(kRootRegister, Operand(roots_array_start)); 632 } 633 634 // --------------------------------------------------------------------------- 635 // JavaScript invokes 636 637 // Invoke the JavaScript function code by either calling or jumping. 638 void InvokeFunctionCode(Register function, Register new_target, 639 const ParameterCount& expected, 640 const ParameterCount& actual, InvokeFlag flag, 641 const CallWrapper& call_wrapper); 642 643 void FloodFunctionIfStepping(Register fun, Register new_target, 644 const ParameterCount& expected, 645 const ParameterCount& actual); 646 647 // Invoke the JavaScript function in the given register. Changes the 648 // current context to the context in the function before invoking. 649 void InvokeFunction(Register function, 650 Register new_target, 651 const ParameterCount& actual, 652 InvokeFlag flag, 653 const CallWrapper& call_wrapper); 654 655 void InvokeFunction(Register function, 656 const ParameterCount& expected, 657 const ParameterCount& actual, 658 InvokeFlag flag, 659 const CallWrapper& call_wrapper); 660 661 void InvokeFunction(Handle<JSFunction> function, 662 const ParameterCount& expected, 663 const ParameterCount& actual, 664 InvokeFlag flag, 665 const CallWrapper& call_wrapper); 666 667 void IsObjectJSStringType(Register object, 668 Register scratch, 669 Label* fail); 670 671 void IsObjectNameType(Register object, 672 Register scratch, 673 Label* fail); 674 675 // --------------------------------------------------------------------------- 676 // Debugger Support 677 678 void DebugBreak(); 679 680 // --------------------------------------------------------------------------- 681 // Exception handling 682 683 // Push a new stack handler and link into stack handler chain. 684 void PushStackHandler(); 685 686 // Unlink the stack handler on top of the stack from the stack handler chain. 687 // Must preserve the result register. 688 void PopStackHandler(); 689 690 // --------------------------------------------------------------------------- 691 // Inline caching support 692 693 // Generate code for checking access rights - used for security checks 694 // on access to global objects across environments. The holder register 695 // is left untouched, whereas both scratch registers are clobbered. 696 void CheckAccessGlobalProxy(Register holder_reg, 697 Register scratch, 698 Label* miss); 699 700 void GetNumberHash(Register t0, Register scratch); 701 702 void LoadFromNumberDictionary(Label* miss, 703 Register elements, 704 Register key, 705 Register result, 706 Register t0, 707 Register t1, 708 Register t2); 709 710 711 inline void MarkCode(NopMarkerTypes type) { 712 nop(type); 713 } 714 715 // Check if the given instruction is a 'type' marker. 716 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 717 // These instructions are generated to mark special location in the code, 718 // like some special IC code. 719 static inline bool IsMarkedCode(Instr instr, int type) { 720 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 721 return IsNop(instr, type); 722 } 723 724 725 static inline int GetCodeMarker(Instr instr) { 726 int dst_reg_offset = 12; 727 int dst_mask = 0xf << dst_reg_offset; 728 int src_mask = 0xf; 729 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 730 int src_reg = instr & src_mask; 731 uint32_t non_register_mask = ~(dst_mask | src_mask); 732 uint32_t mov_mask = al | 13 << 21; 733 734 // Return <n> if we have a mov rn rn, else return -1. 735 int type = ((instr & non_register_mask) == mov_mask) && 736 (dst_reg == src_reg) && 737 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER) 738 ? src_reg 739 : -1; 740 DCHECK((type == -1) || 741 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 742 return type; 743 } 744 745 746 // --------------------------------------------------------------------------- 747 // Allocation support 748 749 // Allocate an object in new space or old space. The object_size is 750 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 751 // is passed. If the space is exhausted control continues at the gc_required 752 // label. The allocated object is returned in result. If the flag 753 // tag_allocated_object is true the result is tagged as as a heap object. 754 // All registers are clobbered also when control continues at the gc_required 755 // label. 756 void Allocate(int object_size, 757 Register result, 758 Register scratch1, 759 Register scratch2, 760 Label* gc_required, 761 AllocationFlags flags); 762 763 void Allocate(Register object_size, Register result, Register result_end, 764 Register scratch, Label* gc_required, AllocationFlags flags); 765 766 void AllocateTwoByteString(Register result, 767 Register length, 768 Register scratch1, 769 Register scratch2, 770 Register scratch3, 771 Label* gc_required); 772 void AllocateOneByteString(Register result, Register length, 773 Register scratch1, Register scratch2, 774 Register scratch3, Label* gc_required); 775 void AllocateTwoByteConsString(Register result, 776 Register length, 777 Register scratch1, 778 Register scratch2, 779 Label* gc_required); 780 void AllocateOneByteConsString(Register result, Register length, 781 Register scratch1, Register scratch2, 782 Label* gc_required); 783 void AllocateTwoByteSlicedString(Register result, 784 Register length, 785 Register scratch1, 786 Register scratch2, 787 Label* gc_required); 788 void AllocateOneByteSlicedString(Register result, Register length, 789 Register scratch1, Register scratch2, 790 Label* gc_required); 791 792 // Allocates a heap number or jumps to the gc_required label if the young 793 // space is full and a scavenge is needed. All registers are clobbered also 794 // when control continues at the gc_required label. 795 void AllocateHeapNumber(Register result, 796 Register scratch1, 797 Register scratch2, 798 Register heap_number_map, 799 Label* gc_required, 800 TaggingMode tagging_mode = TAG_RESULT, 801 MutableMode mode = IMMUTABLE); 802 void AllocateHeapNumberWithValue(Register result, 803 DwVfpRegister value, 804 Register scratch1, 805 Register scratch2, 806 Register heap_number_map, 807 Label* gc_required); 808 809 // Allocate and initialize a JSValue wrapper with the specified {constructor} 810 // and {value}. 811 void AllocateJSValue(Register result, Register constructor, Register value, 812 Register scratch1, Register scratch2, 813 Label* gc_required); 814 815 // Copies a number of bytes from src to dst. All registers are clobbered. On 816 // exit src and dst will point to the place just after where the last byte was 817 // read or written and length will be zero. 818 void CopyBytes(Register src, 819 Register dst, 820 Register length, 821 Register scratch); 822 823 // Initialize fields with filler values. Fields starting at |current_address| 824 // not including |end_address| are overwritten with the value in |filler|. At 825 // the end the loop, |current_address| takes the value of |end_address|. 826 void InitializeFieldsWithFiller(Register current_address, 827 Register end_address, Register filler); 828 829 // --------------------------------------------------------------------------- 830 // Support functions. 831 832 // Machine code version of Map::GetConstructor(). 833 // |temp| holds |result|'s map when done, and |temp2| its instance type. 834 void GetMapConstructor(Register result, Register map, Register temp, 835 Register temp2); 836 837 // Try to get function prototype of a function and puts the value in 838 // the result register. Checks that the function really is a 839 // function and jumps to the miss label if the fast checks fail. The 840 // function register will be untouched; the other registers may be 841 // clobbered. 842 void TryGetFunctionPrototype(Register function, Register result, 843 Register scratch, Label* miss); 844 845 // Compare object type for heap object. heap_object contains a non-Smi 846 // whose object type should be compared with the given type. This both 847 // sets the flags and leaves the object type in the type_reg register. 848 // It leaves the map in the map register (unless the type_reg and map register 849 // are the same register). It leaves the heap object in the heap_object 850 // register unless the heap_object register is the same register as one of the 851 // other registers. 852 // Type_reg can be no_reg. In that case ip is used. 853 void CompareObjectType(Register heap_object, 854 Register map, 855 Register type_reg, 856 InstanceType type); 857 858 // Compare instance type in a map. map contains a valid map object whose 859 // object type should be compared with the given type. This both 860 // sets the flags and leaves the object type in the type_reg register. 861 void CompareInstanceType(Register map, 862 Register type_reg, 863 InstanceType type); 864 865 866 // Check if a map for a JSObject indicates that the object has fast elements. 867 // Jump to the specified label if it does not. 868 void CheckFastElements(Register map, 869 Register scratch, 870 Label* fail); 871 872 // Check if a map for a JSObject indicates that the object can have both smi 873 // and HeapObject elements. Jump to the specified label if it does not. 874 void CheckFastObjectElements(Register map, 875 Register scratch, 876 Label* fail); 877 878 // Check if a map for a JSObject indicates that the object has fast smi only 879 // elements. Jump to the specified label if it does not. 880 void CheckFastSmiElements(Register map, 881 Register scratch, 882 Label* fail); 883 884 // Check to see if maybe_number can be stored as a double in 885 // FastDoubleElements. If it can, store it at the index specified by key in 886 // the FastDoubleElements array elements. Otherwise jump to fail. 887 void StoreNumberToDoubleElements(Register value_reg, 888 Register key_reg, 889 Register elements_reg, 890 Register scratch1, 891 LowDwVfpRegister double_scratch, 892 Label* fail, 893 int elements_offset = 0); 894 895 // Compare an object's map with the specified map and its transitioned 896 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 897 // set with result of map compare. If multiple map compares are required, the 898 // compare sequences branches to early_success. 899 void CompareMap(Register obj, 900 Register scratch, 901 Handle<Map> map, 902 Label* early_success); 903 904 // As above, but the map of the object is already loaded into the register 905 // which is preserved by the code generated. 906 void CompareMap(Register obj_map, 907 Handle<Map> map, 908 Label* early_success); 909 910 // Check if the map of an object is equal to a specified map and branch to 911 // label if not. Skip the smi check if not required (object is known to be a 912 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 913 // against maps that are ElementsKind transition maps of the specified map. 914 void CheckMap(Register obj, 915 Register scratch, 916 Handle<Map> map, 917 Label* fail, 918 SmiCheckType smi_check_type); 919 920 921 void CheckMap(Register obj, 922 Register scratch, 923 Heap::RootListIndex index, 924 Label* fail, 925 SmiCheckType smi_check_type); 926 927 928 // Check if the map of an object is equal to a specified weak map and branch 929 // to a specified target if equal. Skip the smi check if not required 930 // (object is known to be a heap object) 931 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2, 932 Handle<WeakCell> cell, Handle<Code> success, 933 SmiCheckType smi_check_type); 934 935 // Compare the given value and the value of weak cell. 936 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch); 937 938 void GetWeakValue(Register value, Handle<WeakCell> cell); 939 940 // Load the value of the weak cell in the value register. Branch to the given 941 // miss label if the weak cell was cleared. 942 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss); 943 944 // Compare the object in a register to a value from the root list. 945 // Uses the ip register as scratch. 946 void CompareRoot(Register obj, Heap::RootListIndex index); 947 void PushRoot(Heap::RootListIndex index) { 948 LoadRoot(ip, index); 949 Push(ip); 950 } 951 952 // Compare the object in a register to a value and jump if they are equal. 953 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) { 954 CompareRoot(with, index); 955 b(eq, if_equal); 956 } 957 958 // Compare the object in a register to a value and jump if they are not equal. 959 void JumpIfNotRoot(Register with, Heap::RootListIndex index, 960 Label* if_not_equal) { 961 CompareRoot(with, index); 962 b(ne, if_not_equal); 963 } 964 965 // Load and check the instance type of an object for being a string. 966 // Loads the type into the second argument register. 967 // Returns a condition that will be enabled if the object was a string 968 // and the passed-in condition passed. If the passed-in condition failed 969 // then flags remain unchanged. 970 Condition IsObjectStringType(Register obj, 971 Register type, 972 Condition cond = al) { 973 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); 974 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); 975 tst(type, Operand(kIsNotStringMask), cond); 976 DCHECK_EQ(0u, kStringTag); 977 return eq; 978 } 979 980 981 // Picks out an array index from the hash field. 982 // Register use: 983 // hash - holds the index's hash. Clobbered. 984 // index - holds the overwritten index on exit. 985 void IndexFromHash(Register hash, Register index); 986 987 // Get the number of least significant bits from a register 988 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 989 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 990 991 // Load the value of a smi object into a double register. 992 // The register value must be between d0 and d15. 993 void SmiToDouble(LowDwVfpRegister value, Register smi); 994 995 // Check if a double can be exactly represented as a signed 32-bit integer. 996 // Z flag set to one if true. 997 void TestDoubleIsInt32(DwVfpRegister double_input, 998 LowDwVfpRegister double_scratch); 999 1000 // Try to convert a double to a signed 32-bit integer. 1001 // Z flag set to one and result assigned if the conversion is exact. 1002 void TryDoubleToInt32Exact(Register result, 1003 DwVfpRegister double_input, 1004 LowDwVfpRegister double_scratch); 1005 1006 // Floor a double and writes the value to the result register. 1007 // Go to exact if the conversion is exact (to be able to test -0), 1008 // fall through calling code if an overflow occurred, else go to done. 1009 // In return, input_high is loaded with high bits of input. 1010 void TryInt32Floor(Register result, 1011 DwVfpRegister double_input, 1012 Register input_high, 1013 LowDwVfpRegister double_scratch, 1014 Label* done, 1015 Label* exact); 1016 1017 // Performs a truncating conversion of a floating point number as used by 1018 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 1019 // succeeds, otherwise falls through if result is saturated. On return 1020 // 'result' either holds answer, or is clobbered on fall through. 1021 // 1022 // Only public for the test code in test-code-stubs-arm.cc. 1023 void TryInlineTruncateDoubleToI(Register result, 1024 DwVfpRegister input, 1025 Label* done); 1026 1027 // Performs a truncating conversion of a floating point number as used by 1028 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1029 // Exits with 'result' holding the answer. 1030 void TruncateDoubleToI(Register result, DwVfpRegister double_input); 1031 1032 // Performs a truncating conversion of a heap number as used by 1033 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1034 // must be different registers. Exits with 'result' holding the answer. 1035 void TruncateHeapNumberToI(Register result, Register object); 1036 1037 // Converts the smi or heap number in object to an int32 using the rules 1038 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1039 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1040 // different registers. 1041 void TruncateNumberToI(Register object, 1042 Register result, 1043 Register heap_number_map, 1044 Register scratch1, 1045 Label* not_int32); 1046 1047 // Check whether d16-d31 are available on the CPU. The result is given by the 1048 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. 1049 void CheckFor32DRegs(Register scratch); 1050 1051 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double 1052 // values to location, saving [d0..(d15|d31)]. 1053 void SaveFPRegs(Register location, Register scratch); 1054 1055 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double 1056 // values to location, restoring [d0..(d15|d31)]. 1057 void RestoreFPRegs(Register location, Register scratch); 1058 1059 // --------------------------------------------------------------------------- 1060 // Runtime calls 1061 1062 // Call a code stub. 1063 void CallStub(CodeStub* stub, 1064 TypeFeedbackId ast_id = TypeFeedbackId::None(), 1065 Condition cond = al); 1066 1067 // Call a code stub. 1068 void TailCallStub(CodeStub* stub, Condition cond = al); 1069 1070 // Call a runtime routine. 1071 void CallRuntime(const Runtime::Function* f, 1072 int num_arguments, 1073 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1074 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { 1075 const Runtime::Function* function = Runtime::FunctionForId(fid); 1076 CallRuntime(function, function->nargs, kSaveFPRegs); 1077 } 1078 1079 // Convenience function: Same as above, but takes the fid instead. 1080 void CallRuntime(Runtime::FunctionId fid, 1081 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1082 const Runtime::Function* function = Runtime::FunctionForId(fid); 1083 CallRuntime(function, function->nargs, save_doubles); 1084 } 1085 1086 // Convenience function: Same as above, but takes the fid instead. 1087 void CallRuntime(Runtime::FunctionId fid, int num_arguments, 1088 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1089 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); 1090 } 1091 1092 // Convenience function: call an external reference. 1093 void CallExternalReference(const ExternalReference& ext, 1094 int num_arguments); 1095 1096 // Convenience function: tail call a runtime routine (jump). 1097 void TailCallRuntime(Runtime::FunctionId fid); 1098 1099 int CalculateStackPassedWords(int num_reg_arguments, 1100 int num_double_arguments); 1101 1102 // Before calling a C-function from generated code, align arguments on stack. 1103 // After aligning the frame, non-register arguments must be stored in 1104 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 1105 // are word sized. If double arguments are used, this function assumes that 1106 // all double arguments are stored before core registers; otherwise the 1107 // correct alignment of the double values is not guaranteed. 1108 // Some compilers/platforms require the stack to be aligned when calling 1109 // C++ code. 1110 // Needs a scratch register to do some arithmetic. This register will be 1111 // trashed. 1112 void PrepareCallCFunction(int num_reg_arguments, 1113 int num_double_registers, 1114 Register scratch); 1115 void PrepareCallCFunction(int num_reg_arguments, 1116 Register scratch); 1117 1118 // There are two ways of passing double arguments on ARM, depending on 1119 // whether soft or hard floating point ABI is used. These functions 1120 // abstract parameter passing for the three different ways we call 1121 // C functions from generated code. 1122 void MovToFloatParameter(DwVfpRegister src); 1123 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); 1124 void MovToFloatResult(DwVfpRegister src); 1125 1126 // Calls a C function and cleans up the space for arguments allocated 1127 // by PrepareCallCFunction. The called function is not allowed to trigger a 1128 // garbage collection, since that might move the code and invalidate the 1129 // return address (unless this is somehow accounted for by the called 1130 // function). 1131 void CallCFunction(ExternalReference function, int num_arguments); 1132 void CallCFunction(Register function, int num_arguments); 1133 void CallCFunction(ExternalReference function, 1134 int num_reg_arguments, 1135 int num_double_arguments); 1136 void CallCFunction(Register function, 1137 int num_reg_arguments, 1138 int num_double_arguments); 1139 1140 void MovFromFloatParameter(DwVfpRegister dst); 1141 void MovFromFloatResult(DwVfpRegister dst); 1142 1143 // Jump to a runtime routine. 1144 void JumpToExternalReference(const ExternalReference& builtin); 1145 1146 // Invoke specified builtin JavaScript function. 1147 void InvokeBuiltin(int native_context_index, InvokeFlag flag, 1148 const CallWrapper& call_wrapper = NullCallWrapper()); 1149 1150 Handle<Object> CodeObject() { 1151 DCHECK(!code_object_.is_null()); 1152 return code_object_; 1153 } 1154 1155 1156 // Emit code for a truncating division by a constant. The dividend register is 1157 // unchanged and ip gets clobbered. Dividend and result must be different. 1158 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1159 1160 // --------------------------------------------------------------------------- 1161 // StatsCounter support 1162 1163 void SetCounter(StatsCounter* counter, int value, 1164 Register scratch1, Register scratch2); 1165 void IncrementCounter(StatsCounter* counter, int value, 1166 Register scratch1, Register scratch2); 1167 void DecrementCounter(StatsCounter* counter, int value, 1168 Register scratch1, Register scratch2); 1169 1170 1171 // --------------------------------------------------------------------------- 1172 // Debugging 1173 1174 // Calls Abort(msg) if the condition cond is not satisfied. 1175 // Use --debug_code to enable. 1176 void Assert(Condition cond, BailoutReason reason); 1177 void AssertFastElements(Register elements); 1178 1179 // Like Assert(), but always enabled. 1180 void Check(Condition cond, BailoutReason reason); 1181 1182 // Print a message to stdout and abort execution. 1183 void Abort(BailoutReason msg); 1184 1185 // Verify restrictions about code generated in stubs. 1186 void set_generating_stub(bool value) { generating_stub_ = value; } 1187 bool generating_stub() { return generating_stub_; } 1188 void set_has_frame(bool value) { has_frame_ = value; } 1189 bool has_frame() { return has_frame_; } 1190 inline bool AllowThisStubCall(CodeStub* stub); 1191 1192 // EABI variant for double arguments in use. 1193 bool use_eabi_hardfloat() { 1194 #ifdef __arm__ 1195 return base::OS::ArmUsingHardFloat(); 1196 #elif USE_EABI_HARDFLOAT 1197 return true; 1198 #else 1199 return false; 1200 #endif 1201 } 1202 1203 // --------------------------------------------------------------------------- 1204 // Number utilities 1205 1206 // Check whether the value of reg is a power of two and not zero. If not 1207 // control continues at the label not_power_of_two. If reg is a power of two 1208 // the register scratch contains the value of (reg - 1) when control falls 1209 // through. 1210 void JumpIfNotPowerOfTwoOrZero(Register reg, 1211 Register scratch, 1212 Label* not_power_of_two_or_zero); 1213 // Check whether the value of reg is a power of two and not zero. 1214 // Control falls through if it is, with scratch containing the mask 1215 // value (reg - 1). 1216 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1217 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1218 // strictly positive but not a power of two. 1219 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, 1220 Register scratch, 1221 Label* zero_and_neg, 1222 Label* not_power_of_two); 1223 1224 // --------------------------------------------------------------------------- 1225 // Smi utilities 1226 1227 void SmiTag(Register reg, SBit s = LeaveCC) { 1228 add(reg, reg, Operand(reg), s); 1229 } 1230 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { 1231 add(dst, src, Operand(src), s); 1232 } 1233 1234 // Try to convert int32 to smi. If the value is to large, preserve 1235 // the original value and jump to not_a_smi. Destroys scratch and 1236 // sets flags. 1237 void TrySmiTag(Register reg, Label* not_a_smi) { 1238 TrySmiTag(reg, reg, not_a_smi); 1239 } 1240 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { 1241 SmiTag(ip, src, SetCC); 1242 b(vs, not_a_smi); 1243 mov(reg, ip); 1244 } 1245 1246 1247 void SmiUntag(Register reg, SBit s = LeaveCC) { 1248 mov(reg, Operand::SmiUntag(reg), s); 1249 } 1250 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { 1251 mov(dst, Operand::SmiUntag(src), s); 1252 } 1253 1254 // Untag the source value into destination and jump if source is a smi. 1255 // Souce and destination can be the same register. 1256 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1257 1258 // Untag the source value into destination and jump if source is not a smi. 1259 // Souce and destination can be the same register. 1260 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1261 1262 // Test if the register contains a smi (Z == 0 (eq) if true). 1263 inline void SmiTst(Register value) { 1264 tst(value, Operand(kSmiTagMask)); 1265 } 1266 inline void NonNegativeSmiTst(Register value) { 1267 tst(value, Operand(kSmiTagMask | kSmiSignMask)); 1268 } 1269 // Jump if the register contains a smi. 1270 inline void JumpIfSmi(Register value, Label* smi_label) { 1271 tst(value, Operand(kSmiTagMask)); 1272 b(eq, smi_label); 1273 } 1274 // Jump if either of the registers contain a non-smi. 1275 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1276 tst(value, Operand(kSmiTagMask)); 1277 b(ne, not_smi_label); 1278 } 1279 // Jump if either of the registers contain a non-smi. 1280 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1281 // Jump if either of the registers contain a smi. 1282 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1283 1284 // Abort execution if argument is a smi, enabled via --debug-code. 1285 void AssertNotSmi(Register object); 1286 void AssertSmi(Register object); 1287 1288 // Abort execution if argument is not a string, enabled via --debug-code. 1289 void AssertString(Register object); 1290 1291 // Abort execution if argument is not a name, enabled via --debug-code. 1292 void AssertName(Register object); 1293 1294 // Abort execution if argument is not a JSFunction, enabled via --debug-code. 1295 void AssertFunction(Register object); 1296 1297 // Abort execution if argument is not a JSBoundFunction, 1298 // enabled via --debug-code. 1299 void AssertBoundFunction(Register object); 1300 1301 // Abort execution if argument is not undefined or an AllocationSite, enabled 1302 // via --debug-code. 1303 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1304 1305 // Abort execution if reg is not the root value with the given index, 1306 // enabled via --debug-code. 1307 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1308 1309 // --------------------------------------------------------------------------- 1310 // HeapNumber utilities 1311 1312 void JumpIfNotHeapNumber(Register object, 1313 Register heap_number_map, 1314 Register scratch, 1315 Label* on_not_heap_number); 1316 1317 // --------------------------------------------------------------------------- 1318 // String utilities 1319 1320 // Checks if both objects are sequential one-byte strings and jumps to label 1321 // if either is not. Assumes that neither object is a smi. 1322 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1, 1323 Register object2, 1324 Register scratch1, 1325 Register scratch2, 1326 Label* failure); 1327 1328 // Checks if both objects are sequential one-byte strings and jumps to label 1329 // if either is not. 1330 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1331 Register scratch1, 1332 Register scratch2, 1333 Label* not_flat_one_byte_strings); 1334 1335 // Checks if both instance types are sequential one-byte strings and jumps to 1336 // label if either is not. 1337 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1338 Register first_object_instance_type, Register second_object_instance_type, 1339 Register scratch1, Register scratch2, Label* failure); 1340 1341 // Check if instance type is sequential one-byte string and jump to label if 1342 // it is not. 1343 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1344 Label* failure); 1345 1346 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); 1347 1348 void EmitSeqStringSetCharCheck(Register string, 1349 Register index, 1350 Register value, 1351 uint32_t encoding_mask); 1352 1353 1354 void ClampUint8(Register output_reg, Register input_reg); 1355 1356 void ClampDoubleToUint8(Register result_reg, 1357 DwVfpRegister input_reg, 1358 LowDwVfpRegister double_scratch); 1359 1360 1361 void LoadInstanceDescriptors(Register map, Register descriptors); 1362 void EnumLength(Register dst, Register map); 1363 void NumberOfOwnDescriptors(Register dst, Register map); 1364 void LoadAccessor(Register dst, Register holder, int accessor_index, 1365 AccessorComponent accessor); 1366 1367 template<typename Field> 1368 void DecodeField(Register dst, Register src) { 1369 Ubfx(dst, src, Field::kShift, Field::kSize); 1370 } 1371 1372 template<typename Field> 1373 void DecodeField(Register reg) { 1374 DecodeField<Field>(reg, reg); 1375 } 1376 1377 template<typename Field> 1378 void DecodeFieldToSmi(Register dst, Register src) { 1379 static const int shift = Field::kShift; 1380 static const int mask = Field::kMask >> shift << kSmiTagSize; 1381 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); 1382 STATIC_ASSERT(kSmiTag == 0); 1383 if (shift < kSmiTagSize) { 1384 mov(dst, Operand(src, LSL, kSmiTagSize - shift)); 1385 and_(dst, dst, Operand(mask)); 1386 } else if (shift > kSmiTagSize) { 1387 mov(dst, Operand(src, LSR, shift - kSmiTagSize)); 1388 and_(dst, dst, Operand(mask)); 1389 } else { 1390 and_(dst, src, Operand(mask)); 1391 } 1392 } 1393 1394 template<typename Field> 1395 void DecodeFieldToSmi(Register reg) { 1396 DecodeField<Field>(reg, reg); 1397 } 1398 1399 // Load the type feedback vector from a JavaScript frame. 1400 void EmitLoadTypeFeedbackVector(Register vector); 1401 1402 // Activation support. 1403 void EnterFrame(StackFrame::Type type, 1404 bool load_constant_pool_pointer_reg = false); 1405 // Returns the pc offset at which the frame ends. 1406 int LeaveFrame(StackFrame::Type type); 1407 1408 // Expects object in r0 and returns map with validated enum cache 1409 // in r0. Assumes that any other register can be used as a scratch. 1410 void CheckEnumCache(Register null_value, Label* call_runtime); 1411 1412 // AllocationMemento support. Arrays may have an associated 1413 // AllocationMemento object that can be checked for in order to pretransition 1414 // to another type. 1415 // On entry, receiver_reg should point to the array object. 1416 // scratch_reg gets clobbered. 1417 // If allocation info is present, condition flags are set to eq. 1418 void TestJSArrayForAllocationMemento(Register receiver_reg, 1419 Register scratch_reg, 1420 Label* no_memento_found); 1421 1422 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1423 Register scratch_reg, 1424 Label* memento_found) { 1425 Label no_memento_found; 1426 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1427 &no_memento_found); 1428 b(eq, memento_found); 1429 bind(&no_memento_found); 1430 } 1431 1432 // Jumps to found label if a prototype map has dictionary elements. 1433 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1434 Register scratch1, Label* found); 1435 1436 // Loads the constant pool pointer (pp) register. 1437 void LoadConstantPoolPointerRegisterFromCodeTargetAddress( 1438 Register code_target_address); 1439 void LoadConstantPoolPointerRegister(); 1440 1441 private: 1442 void CallCFunctionHelper(Register function, 1443 int num_reg_arguments, 1444 int num_double_arguments); 1445 1446 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1447 1448 // Helper functions for generating invokes. 1449 void InvokePrologue(const ParameterCount& expected, 1450 const ParameterCount& actual, 1451 Label* done, 1452 bool* definitely_mismatches, 1453 InvokeFlag flag, 1454 const CallWrapper& call_wrapper); 1455 1456 void InitializeNewString(Register string, 1457 Register length, 1458 Heap::RootListIndex map_index, 1459 Register scratch1, 1460 Register scratch2); 1461 1462 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1463 void InNewSpace(Register object, 1464 Register scratch, 1465 Condition cond, // eq for new space, ne otherwise. 1466 Label* branch); 1467 1468 // Helper for finding the mark bits for an address. Afterwards, the 1469 // bitmap register points at the word with the mark bits and the mask 1470 // the position of the first bit. Leaves addr_reg unchanged. 1471 inline void GetMarkBits(Register addr_reg, 1472 Register bitmap_reg, 1473 Register mask_reg); 1474 1475 // Compute memory operands for safepoint stack slots. 1476 static int SafepointRegisterStackIndex(int reg_code); 1477 MemOperand SafepointRegisterSlot(Register reg); 1478 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1479 1480 bool generating_stub_; 1481 bool has_frame_; 1482 // This handle will be patched with the code object on installation. 1483 Handle<Object> code_object_; 1484 1485 // Needs access to SafepointRegisterStackIndex for compiled frame 1486 // traversal. 1487 friend class StandardFrame; 1488 }; 1489 1490 1491 // The code patcher is used to patch (typically) small parts of code e.g. for 1492 // debugging and other types of instrumentation. When using the code patcher 1493 // the exact number of bytes specified must be emitted. It is not legal to emit 1494 // relocation information. If any of these constraints are violated it causes 1495 // an assertion to fail. 1496 class CodePatcher { 1497 public: 1498 enum FlushICache { 1499 FLUSH, 1500 DONT_FLUSH 1501 }; 1502 1503 CodePatcher(Isolate* isolate, byte* address, int instructions, 1504 FlushICache flush_cache = FLUSH); 1505 ~CodePatcher(); 1506 1507 // Macro assembler to emit code. 1508 MacroAssembler* masm() { return &masm_; } 1509 1510 // Emit an instruction directly. 1511 void Emit(Instr instr); 1512 1513 // Emit an address directly. 1514 void Emit(Address addr); 1515 1516 // Emit the condition part of an instruction leaving the rest of the current 1517 // instruction unchanged. 1518 void EmitCondition(Condition cond); 1519 1520 private: 1521 byte* address_; // The address of the code being patched. 1522 int size_; // Number of bytes of the expected patch size. 1523 MacroAssembler masm_; // Macro assembler used to generate the code. 1524 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1525 }; 1526 1527 1528 // ----------------------------------------------------------------------------- 1529 // Static helper functions. 1530 1531 inline MemOperand ContextMemOperand(Register context, int index = 0) { 1532 return MemOperand(context, Context::SlotOffset(index)); 1533 } 1534 1535 1536 inline MemOperand NativeContextMemOperand() { 1537 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX); 1538 } 1539 1540 1541 #ifdef GENERATED_CODE_COVERAGE 1542 #define CODE_COVERAGE_STRINGIFY(x) #x 1543 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1544 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1545 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 1546 #else 1547 #define ACCESS_MASM(masm) masm-> 1548 #endif 1549 1550 1551 } // namespace internal 1552 } // namespace v8 1553 1554 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ 1555