Home | History | Annotate | Download | only in arm
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include <limits.h>  // For LONG_MIN, LONG_MAX.
     29 
     30 #include "v8.h"
     31 
     32 #if V8_TARGET_ARCH_ARM
     33 
     34 #include "bootstrapper.h"
     35 #include "codegen.h"
     36 #include "cpu-profiler.h"
     37 #include "debug.h"
     38 #include "runtime.h"
     39 
     40 namespace v8 {
     41 namespace internal {
     42 
     43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     44     : Assembler(arg_isolate, buffer, size),
     45       generating_stub_(false),
     46       allow_stub_calls_(true),
     47       has_frame_(false) {
     48   if (isolate() != NULL) {
     49     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
     50                                   isolate());
     51   }
     52 }
     53 
     54 
     55 void MacroAssembler::Jump(Register target, Condition cond) {
     56   bx(target, cond);
     57 }
     58 
     59 
     60 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
     61                           Condition cond) {
     62   mov(ip, Operand(target, rmode));
     63   bx(ip, cond);
     64 }
     65 
     66 
     67 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
     68                           Condition cond) {
     69   ASSERT(!RelocInfo::IsCodeTarget(rmode));
     70   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
     71 }
     72 
     73 
     74 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
     75                           Condition cond) {
     76   ASSERT(RelocInfo::IsCodeTarget(rmode));
     77   // 'code' is always generated ARM code, never THUMB code
     78   AllowDeferredHandleDereference embedding_raw_address;
     79   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
     80 }
     81 
     82 
     83 int MacroAssembler::CallSize(Register target, Condition cond) {
     84   return kInstrSize;
     85 }
     86 
     87 
     88 void MacroAssembler::Call(Register target, Condition cond) {
     89   // Block constant pool for the call instruction sequence.
     90   BlockConstPoolScope block_const_pool(this);
     91   Label start;
     92   bind(&start);
     93   blx(target, cond);
     94   ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
     95 }
     96 
     97 
     98 int MacroAssembler::CallSize(
     99     Address target, RelocInfo::Mode rmode, Condition cond) {
    100   int size = 2 * kInstrSize;
    101   Instr mov_instr = cond | MOV | LeaveCC;
    102   intptr_t immediate = reinterpret_cast<intptr_t>(target);
    103   if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
    104     size += kInstrSize;
    105   }
    106   return size;
    107 }
    108 
    109 
    110 int MacroAssembler::CallSizeNotPredictableCodeSize(
    111     Address target, RelocInfo::Mode rmode, Condition cond) {
    112   int size = 2 * kInstrSize;
    113   Instr mov_instr = cond | MOV | LeaveCC;
    114   intptr_t immediate = reinterpret_cast<intptr_t>(target);
    115   if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
    116     size += kInstrSize;
    117   }
    118   return size;
    119 }
    120 
    121 
    122 void MacroAssembler::Call(Address target,
    123                           RelocInfo::Mode rmode,
    124                           Condition cond,
    125                           TargetAddressStorageMode mode) {
    126   // Block constant pool for the call instruction sequence.
    127   BlockConstPoolScope block_const_pool(this);
    128   Label start;
    129   bind(&start);
    130 
    131   bool old_predictable_code_size = predictable_code_size();
    132   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
    133     set_predictable_code_size(true);
    134   }
    135 
    136   // Call sequence on V7 or later may be :
    137   //  movw  ip, #... @ call address low 16
    138   //  movt  ip, #... @ call address high 16
    139   //  blx   ip
    140   //                      @ return address
    141   // Or for pre-V7 or values that may be back-patched
    142   // to avoid ICache flushes:
    143   //  ldr   ip, [pc, #...] @ call address
    144   //  blx   ip
    145   //                      @ return address
    146 
    147   // Statement positions are expected to be recorded when the target
    148   // address is loaded. The mov method will automatically record
    149   // positions when pc is the target, since this is not the case here
    150   // we have to do it explicitly.
    151   positions_recorder()->WriteRecordedPositions();
    152 
    153   mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
    154   blx(ip, cond);
    155 
    156   ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
    157   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
    158     set_predictable_code_size(old_predictable_code_size);
    159   }
    160 }
    161 
    162 
    163 int MacroAssembler::CallSize(Handle<Code> code,
    164                              RelocInfo::Mode rmode,
    165                              TypeFeedbackId ast_id,
    166                              Condition cond) {
    167   AllowDeferredHandleDereference using_raw_address;
    168   return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
    169 }
    170 
    171 
    172 void MacroAssembler::Call(Handle<Code> code,
    173                           RelocInfo::Mode rmode,
    174                           TypeFeedbackId ast_id,
    175                           Condition cond,
    176                           TargetAddressStorageMode mode) {
    177   Label start;
    178   bind(&start);
    179   ASSERT(RelocInfo::IsCodeTarget(rmode));
    180   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
    181     SetRecordedAstId(ast_id);
    182     rmode = RelocInfo::CODE_TARGET_WITH_ID;
    183   }
    184   // 'code' is always generated ARM code, never THUMB code
    185   AllowDeferredHandleDereference embedding_raw_address;
    186   Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
    187 }
    188 
    189 
    190 void MacroAssembler::Ret(Condition cond) {
    191   bx(lr, cond);
    192 }
    193 
    194 
    195 void MacroAssembler::Drop(int count, Condition cond) {
    196   if (count > 0) {
    197     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
    198   }
    199 }
    200 
    201 
    202 void MacroAssembler::Ret(int drop, Condition cond) {
    203   Drop(drop, cond);
    204   Ret(cond);
    205 }
    206 
    207 
    208 void MacroAssembler::Swap(Register reg1,
    209                           Register reg2,
    210                           Register scratch,
    211                           Condition cond) {
    212   if (scratch.is(no_reg)) {
    213     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
    214     eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
    215     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
    216   } else {
    217     mov(scratch, reg1, LeaveCC, cond);
    218     mov(reg1, reg2, LeaveCC, cond);
    219     mov(reg2, scratch, LeaveCC, cond);
    220   }
    221 }
    222 
    223 
    224 void MacroAssembler::Call(Label* target) {
    225   bl(target);
    226 }
    227 
    228 
    229 void MacroAssembler::Push(Handle<Object> handle) {
    230   mov(ip, Operand(handle));
    231   push(ip);
    232 }
    233 
    234 
    235 void MacroAssembler::Move(Register dst, Handle<Object> value) {
    236   mov(dst, Operand(value));
    237 }
    238 
    239 
    240 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
    241   if (!dst.is(src)) {
    242     mov(dst, src, LeaveCC, cond);
    243   }
    244 }
    245 
    246 
    247 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
    248   if (!dst.is(src)) {
    249     vmov(dst, src);
    250   }
    251 }
    252 
    253 
    254 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
    255                          Condition cond) {
    256   if (!src2.is_reg() &&
    257       !src2.must_output_reloc_info(this) &&
    258       src2.immediate() == 0) {
    259     mov(dst, Operand::Zero(), LeaveCC, cond);
    260   } else if (!src2.is_single_instruction(this) &&
    261              !src2.must_output_reloc_info(this) &&
    262              CpuFeatures::IsSupported(ARMv7) &&
    263              IsPowerOf2(src2.immediate() + 1)) {
    264     ubfx(dst, src1, 0,
    265         WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
    266   } else {
    267     and_(dst, src1, src2, LeaveCC, cond);
    268   }
    269 }
    270 
    271 
    272 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
    273                           Condition cond) {
    274   ASSERT(lsb < 32);
    275   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
    276     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    277     and_(dst, src1, Operand(mask), LeaveCC, cond);
    278     if (lsb != 0) {
    279       mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
    280     }
    281   } else {
    282     ubfx(dst, src1, lsb, width, cond);
    283   }
    284 }
    285 
    286 
    287 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
    288                           Condition cond) {
    289   ASSERT(lsb < 32);
    290   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
    291     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    292     and_(dst, src1, Operand(mask), LeaveCC, cond);
    293     int shift_up = 32 - lsb - width;
    294     int shift_down = lsb + shift_up;
    295     if (shift_up != 0) {
    296       mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
    297     }
    298     if (shift_down != 0) {
    299       mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
    300     }
    301   } else {
    302     sbfx(dst, src1, lsb, width, cond);
    303   }
    304 }
    305 
    306 
    307 void MacroAssembler::Bfi(Register dst,
    308                          Register src,
    309                          Register scratch,
    310                          int lsb,
    311                          int width,
    312                          Condition cond) {
    313   ASSERT(0 <= lsb && lsb < 32);
    314   ASSERT(0 <= width && width < 32);
    315   ASSERT(lsb + width < 32);
    316   ASSERT(!scratch.is(dst));
    317   if (width == 0) return;
    318   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
    319     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    320     bic(dst, dst, Operand(mask));
    321     and_(scratch, src, Operand((1 << width) - 1));
    322     mov(scratch, Operand(scratch, LSL, lsb));
    323     orr(dst, dst, scratch);
    324   } else {
    325     bfi(dst, src, lsb, width, cond);
    326   }
    327 }
    328 
    329 
    330 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
    331                          Condition cond) {
    332   ASSERT(lsb < 32);
    333   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
    334     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    335     bic(dst, src, Operand(mask));
    336   } else {
    337     Move(dst, src, cond);
    338     bfc(dst, lsb, width, cond);
    339   }
    340 }
    341 
    342 
    343 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
    344                           Condition cond) {
    345   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
    346     ASSERT(!dst.is(pc) && !src.rm().is(pc));
    347     ASSERT((satpos >= 0) && (satpos <= 31));
    348 
    349     // These asserts are required to ensure compatibility with the ARMv7
    350     // implementation.
    351     ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
    352     ASSERT(src.rs().is(no_reg));
    353 
    354     Label done;
    355     int satval = (1 << satpos) - 1;
    356 
    357     if (cond != al) {
    358       b(NegateCondition(cond), &done);  // Skip saturate if !condition.
    359     }
    360     if (!(src.is_reg() && dst.is(src.rm()))) {
    361       mov(dst, src);
    362     }
    363     tst(dst, Operand(~satval));
    364     b(eq, &done);
    365     mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
    366     mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
    367     bind(&done);
    368   } else {
    369     usat(dst, satpos, src, cond);
    370   }
    371 }
    372 
    373 
    374 void MacroAssembler::LoadRoot(Register destination,
    375                               Heap::RootListIndex index,
    376                               Condition cond) {
    377   if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
    378       isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
    379       !predictable_code_size()) {
    380     // The CPU supports fast immediate values, and this root will never
    381     // change. We will load it as a relocatable immediate value.
    382     Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
    383     mov(destination, Operand(root), LeaveCC, cond);
    384     return;
    385   }
    386   ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
    387 }
    388 
    389 
    390 void MacroAssembler::StoreRoot(Register source,
    391                                Heap::RootListIndex index,
    392                                Condition cond) {
    393   str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
    394 }
    395 
    396 
    397 void MacroAssembler::LoadHeapObject(Register result,
    398                                     Handle<HeapObject> object) {
    399   AllowDeferredHandleDereference using_raw_address;
    400   if (isolate()->heap()->InNewSpace(*object)) {
    401     Handle<Cell> cell = isolate()->factory()->NewCell(object);
    402     mov(result, Operand(cell));
    403     ldr(result, FieldMemOperand(result, Cell::kValueOffset));
    404   } else {
    405     mov(result, Operand(object));
    406   }
    407 }
    408 
    409 
    410 void MacroAssembler::InNewSpace(Register object,
    411                                 Register scratch,
    412                                 Condition cond,
    413                                 Label* branch) {
    414   ASSERT(cond == eq || cond == ne);
    415   and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
    416   cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
    417   b(cond, branch);
    418 }
    419 
    420 
    421 void MacroAssembler::RecordWriteField(
    422     Register object,
    423     int offset,
    424     Register value,
    425     Register dst,
    426     LinkRegisterStatus lr_status,
    427     SaveFPRegsMode save_fp,
    428     RememberedSetAction remembered_set_action,
    429     SmiCheck smi_check) {
    430   // First, check if a write barrier is even needed. The tests below
    431   // catch stores of Smis.
    432   Label done;
    433 
    434   // Skip barrier if writing a smi.
    435   if (smi_check == INLINE_SMI_CHECK) {
    436     JumpIfSmi(value, &done);
    437   }
    438 
    439   // Although the object register is tagged, the offset is relative to the start
    440   // of the object, so so offset must be a multiple of kPointerSize.
    441   ASSERT(IsAligned(offset, kPointerSize));
    442 
    443   add(dst, object, Operand(offset - kHeapObjectTag));
    444   if (emit_debug_code()) {
    445     Label ok;
    446     tst(dst, Operand((1 << kPointerSizeLog2) - 1));
    447     b(eq, &ok);
    448     stop("Unaligned cell in write barrier");
    449     bind(&ok);
    450   }
    451 
    452   RecordWrite(object,
    453               dst,
    454               value,
    455               lr_status,
    456               save_fp,
    457               remembered_set_action,
    458               OMIT_SMI_CHECK);
    459 
    460   bind(&done);
    461 
    462   // Clobber clobbered input registers when running with the debug-code flag
    463   // turned on to provoke errors.
    464   if (emit_debug_code()) {
    465     mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
    466     mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
    467   }
    468 }
    469 
    470 
    471 // Will clobber 4 registers: object, address, scratch, ip.  The
    472 // register 'object' contains a heap object pointer.  The heap object
    473 // tag is shifted away.
    474 void MacroAssembler::RecordWrite(Register object,
    475                                  Register address,
    476                                  Register value,
    477                                  LinkRegisterStatus lr_status,
    478                                  SaveFPRegsMode fp_mode,
    479                                  RememberedSetAction remembered_set_action,
    480                                  SmiCheck smi_check) {
    481   // The compiled code assumes that record write doesn't change the
    482   // context register, so we check that none of the clobbered
    483   // registers are cp.
    484   ASSERT(!address.is(cp) && !value.is(cp));
    485 
    486   if (emit_debug_code()) {
    487     ldr(ip, MemOperand(address));
    488     cmp(ip, value);
    489     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
    490   }
    491 
    492   Label done;
    493 
    494   if (smi_check == INLINE_SMI_CHECK) {
    495     JumpIfSmi(value, &done);
    496   }
    497 
    498   CheckPageFlag(value,
    499                 value,  // Used as scratch.
    500                 MemoryChunk::kPointersToHereAreInterestingMask,
    501                 eq,
    502                 &done);
    503   CheckPageFlag(object,
    504                 value,  // Used as scratch.
    505                 MemoryChunk::kPointersFromHereAreInterestingMask,
    506                 eq,
    507                 &done);
    508 
    509   // Record the actual write.
    510   if (lr_status == kLRHasNotBeenSaved) {
    511     push(lr);
    512   }
    513   RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
    514   CallStub(&stub);
    515   if (lr_status == kLRHasNotBeenSaved) {
    516     pop(lr);
    517   }
    518 
    519   bind(&done);
    520 
    521   // Clobber clobbered registers when running with the debug-code flag
    522   // turned on to provoke errors.
    523   if (emit_debug_code()) {
    524     mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
    525     mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
    526   }
    527 }
    528 
    529 
    530 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
    531                                          Register address,
    532                                          Register scratch,
    533                                          SaveFPRegsMode fp_mode,
    534                                          RememberedSetFinalAction and_then) {
    535   Label done;
    536   if (emit_debug_code()) {
    537     Label ok;
    538     JumpIfNotInNewSpace(object, scratch, &ok);
    539     stop("Remembered set pointer is in new space");
    540     bind(&ok);
    541   }
    542   // Load store buffer top.
    543   ExternalReference store_buffer =
    544       ExternalReference::store_buffer_top(isolate());
    545   mov(ip, Operand(store_buffer));
    546   ldr(scratch, MemOperand(ip));
    547   // Store pointer to buffer and increment buffer top.
    548   str(address, MemOperand(scratch, kPointerSize, PostIndex));
    549   // Write back new top of buffer.
    550   str(scratch, MemOperand(ip));
    551   // Call stub on end of buffer.
    552   // Check for end of buffer.
    553   tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
    554   if (and_then == kFallThroughAtEnd) {
    555     b(eq, &done);
    556   } else {
    557     ASSERT(and_then == kReturnAtEnd);
    558     Ret(eq);
    559   }
    560   push(lr);
    561   StoreBufferOverflowStub store_buffer_overflow =
    562       StoreBufferOverflowStub(fp_mode);
    563   CallStub(&store_buffer_overflow);
    564   pop(lr);
    565   bind(&done);
    566   if (and_then == kReturnAtEnd) {
    567     Ret();
    568   }
    569 }
    570 
    571 
    572 // Push and pop all registers that can hold pointers.
    573 void MacroAssembler::PushSafepointRegisters() {
    574   // Safepoints expect a block of contiguous register values starting with r0:
    575   ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
    576   // Safepoints expect a block of kNumSafepointRegisters values on the
    577   // stack, so adjust the stack for unsaved registers.
    578   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
    579   ASSERT(num_unsaved >= 0);
    580   sub(sp, sp, Operand(num_unsaved * kPointerSize));
    581   stm(db_w, sp, kSafepointSavedRegisters);
    582 }
    583 
    584 
    585 void MacroAssembler::PopSafepointRegisters() {
    586   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
    587   ldm(ia_w, sp, kSafepointSavedRegisters);
    588   add(sp, sp, Operand(num_unsaved * kPointerSize));
    589 }
    590 
    591 
    592 void MacroAssembler::PushSafepointRegistersAndDoubles() {
    593   // Number of d-regs not known at snapshot time.
    594   ASSERT(!Serializer::enabled());
    595   PushSafepointRegisters();
    596   sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
    597                       kDoubleSize));
    598   for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
    599     vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
    600   }
    601 }
    602 
    603 
    604 void MacroAssembler::PopSafepointRegistersAndDoubles() {
    605   // Number of d-regs not known at snapshot time.
    606   ASSERT(!Serializer::enabled());
    607   for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
    608     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
    609   }
    610   add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
    611                       kDoubleSize));
    612   PopSafepointRegisters();
    613 }
    614 
    615 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
    616                                                              Register dst) {
    617   str(src, SafepointRegistersAndDoublesSlot(dst));
    618 }
    619 
    620 
    621 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
    622   str(src, SafepointRegisterSlot(dst));
    623 }
    624 
    625 
    626 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
    627   ldr(dst, SafepointRegisterSlot(src));
    628 }
    629 
    630 
    631 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
    632   // The registers are pushed starting with the highest encoding,
    633   // which means that lowest encodings are closest to the stack pointer.
    634   ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
    635   return reg_code;
    636 }
    637 
    638 
    639 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
    640   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
    641 }
    642 
    643 
    644 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
    645   // Number of d-regs not known at snapshot time.
    646   ASSERT(!Serializer::enabled());
    647   // General purpose registers are pushed last on the stack.
    648   int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
    649   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
    650   return MemOperand(sp, doubles_size + register_offset);
    651 }
    652 
    653 
    654 void MacroAssembler::Ldrd(Register dst1, Register dst2,
    655                           const MemOperand& src, Condition cond) {
    656   ASSERT(src.rm().is(no_reg));
    657   ASSERT(!dst1.is(lr));  // r14.
    658 
    659   // V8 does not use this addressing mode, so the fallback code
    660   // below doesn't support it yet.
    661   ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
    662 
    663   // Generate two ldr instructions if ldrd is not available.
    664   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
    665       (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
    666     CpuFeatureScope scope(this, ARMv7);
    667     ldrd(dst1, dst2, src, cond);
    668   } else {
    669     if ((src.am() == Offset) || (src.am() == NegOffset)) {
    670       MemOperand src2(src);
    671       src2.set_offset(src2.offset() + 4);
    672       if (dst1.is(src.rn())) {
    673         ldr(dst2, src2, cond);
    674         ldr(dst1, src, cond);
    675       } else {
    676         ldr(dst1, src, cond);
    677         ldr(dst2, src2, cond);
    678       }
    679     } else {  // PostIndex or NegPostIndex.
    680       ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
    681       if (dst1.is(src.rn())) {
    682         ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
    683         ldr(dst1, src, cond);
    684       } else {
    685         MemOperand src2(src);
    686         src2.set_offset(src2.offset() - 4);
    687         ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
    688         ldr(dst2, src2, cond);
    689       }
    690     }
    691   }
    692 }
    693 
    694 
    695 void MacroAssembler::Strd(Register src1, Register src2,
    696                           const MemOperand& dst, Condition cond) {
    697   ASSERT(dst.rm().is(no_reg));
    698   ASSERT(!src1.is(lr));  // r14.
    699 
    700   // V8 does not use this addressing mode, so the fallback code
    701   // below doesn't support it yet.
    702   ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
    703 
    704   // Generate two str instructions if strd is not available.
    705   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
    706       (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
    707     CpuFeatureScope scope(this, ARMv7);
    708     strd(src1, src2, dst, cond);
    709   } else {
    710     MemOperand dst2(dst);
    711     if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
    712       dst2.set_offset(dst2.offset() + 4);
    713       str(src1, dst, cond);
    714       str(src2, dst2, cond);
    715     } else {  // PostIndex or NegPostIndex.
    716       ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
    717       dst2.set_offset(dst2.offset() - 4);
    718       str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
    719       str(src2, dst2, cond);
    720     }
    721   }
    722 }
    723 
    724 
    725 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
    726   // If needed, restore wanted bits of FPSCR.
    727   Label fpscr_done;
    728   vmrs(scratch);
    729   tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
    730   b(ne, &fpscr_done);
    731   orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
    732   vmsr(scratch);
    733   bind(&fpscr_done);
    734 }
    735 
    736 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
    737                                         const Condition cond) {
    738   vsub(value, value, kDoubleRegZero, cond);
    739 }
    740 
    741 
    742 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
    743                                            const DwVfpRegister src2,
    744                                            const Condition cond) {
    745   // Compare and move FPSCR flags to the normal condition flags.
    746   VFPCompareAndLoadFlags(src1, src2, pc, cond);
    747 }
    748 
    749 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
    750                                            const double src2,
    751                                            const Condition cond) {
    752   // Compare and move FPSCR flags to the normal condition flags.
    753   VFPCompareAndLoadFlags(src1, src2, pc, cond);
    754 }
    755 
    756 
    757 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
    758                                             const DwVfpRegister src2,
    759                                             const Register fpscr_flags,
    760                                             const Condition cond) {
    761   // Compare and load FPSCR.
    762   vcmp(src1, src2, cond);
    763   vmrs(fpscr_flags, cond);
    764 }
    765 
    766 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
    767                                             const double src2,
    768                                             const Register fpscr_flags,
    769                                             const Condition cond) {
    770   // Compare and load FPSCR.
    771   vcmp(src1, src2, cond);
    772   vmrs(fpscr_flags, cond);
    773 }
    774 
    775 void MacroAssembler::Vmov(const DwVfpRegister dst,
    776                           const double imm,
    777                           const Register scratch) {
    778   static const DoubleRepresentation minus_zero(-0.0);
    779   static const DoubleRepresentation zero(0.0);
    780   DoubleRepresentation value(imm);
    781   // Handle special values first.
    782   if (value.bits == zero.bits) {
    783     vmov(dst, kDoubleRegZero);
    784   } else if (value.bits == minus_zero.bits) {
    785     vneg(dst, kDoubleRegZero);
    786   } else {
    787     vmov(dst, imm, scratch);
    788   }
    789 }
    790 
    791 
    792 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
    793   if (src.code() < 16) {
    794     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
    795     vmov(dst, loc.high());
    796   } else {
    797     vmov(dst, VmovIndexHi, src);
    798   }
    799 }
    800 
    801 
    802 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
    803   if (dst.code() < 16) {
    804     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
    805     vmov(loc.high(), src);
    806   } else {
    807     vmov(dst, VmovIndexHi, src);
    808   }
    809 }
    810 
    811 
    812 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
    813   if (src.code() < 16) {
    814     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
    815     vmov(dst, loc.low());
    816   } else {
    817     vmov(dst, VmovIndexLo, src);
    818   }
    819 }
    820 
    821 
    822 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
    823   if (dst.code() < 16) {
    824     const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
    825     vmov(loc.low(), src);
    826   } else {
    827     vmov(dst, VmovIndexLo, src);
    828   }
    829 }
    830 
    831 
    832 void MacroAssembler::ConvertNumberToInt32(Register object,
    833                                           Register dst,
    834                                           Register heap_number_map,
    835                                           Register scratch1,
    836                                           Register scratch2,
    837                                           Register scratch3,
    838                                           DwVfpRegister double_scratch1,
    839                                           LowDwVfpRegister double_scratch2,
    840                                           Label* not_number) {
    841   Label done;
    842   UntagAndJumpIfSmi(dst, object, &done);
    843   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
    844   vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
    845   ECMAToInt32(dst, double_scratch1,
    846               scratch1, scratch2, scratch3, double_scratch2);
    847 
    848   bind(&done);
    849 }
    850 
    851 
    852 void MacroAssembler::LoadNumber(Register object,
    853                                 LowDwVfpRegister dst,
    854                                 Register heap_number_map,
    855                                 Register scratch,
    856                                 Label* not_number) {
    857   Label is_smi, done;
    858 
    859   UntagAndJumpIfSmi(scratch, object, &is_smi);
    860   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
    861 
    862   vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
    863   b(&done);
    864 
    865   // Handle loading a double from a smi.
    866   bind(&is_smi);
    867   vmov(dst.high(), scratch);
    868   vcvt_f64_s32(dst, dst.high());
    869 
    870   bind(&done);
    871 }
    872 
    873 
    874 void MacroAssembler::LoadNumberAsInt32Double(Register object,
    875                                              DwVfpRegister double_dst,
    876                                              Register heap_number_map,
    877                                              Register scratch,
    878                                              LowDwVfpRegister double_scratch,
    879                                              Label* not_int32) {
    880   ASSERT(!scratch.is(object));
    881   ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
    882 
    883   Label done, obj_is_not_smi;
    884 
    885   UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
    886   vmov(double_scratch.low(), scratch);
    887   vcvt_f64_s32(double_dst, double_scratch.low());
    888   b(&done);
    889 
    890   bind(&obj_is_not_smi);
    891   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
    892 
    893   // Load the number.
    894   // Load the double value.
    895   vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
    896 
    897   TestDoubleIsInt32(double_dst, double_scratch);
    898   // Jump to not_int32 if the operation did not succeed.
    899   b(ne, not_int32);
    900 
    901   bind(&done);
    902 }
    903 
    904 
    905 void MacroAssembler::LoadNumberAsInt32(Register object,
    906                                        Register dst,
    907                                        Register heap_number_map,
    908                                        Register scratch,
    909                                        DwVfpRegister double_scratch0,
    910                                        LowDwVfpRegister double_scratch1,
    911                                        Label* not_int32) {
    912   ASSERT(!dst.is(object));
    913   ASSERT(!scratch.is(object));
    914 
    915   Label done, maybe_undefined;
    916 
    917   UntagAndJumpIfSmi(dst, object, &done);
    918 
    919   JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
    920 
    921   // Object is a heap number.
    922   // Convert the floating point value to a 32-bit integer.
    923   // Load the double value.
    924   vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
    925 
    926   TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
    927   // Jump to not_int32 if the operation did not succeed.
    928   b(ne, not_int32);
    929   b(&done);
    930 
    931   bind(&maybe_undefined);
    932   CompareRoot(object, Heap::kUndefinedValueRootIndex);
    933   b(ne, not_int32);
    934   // |undefined| is truncated to 0.
    935   mov(dst, Operand(Smi::FromInt(0)));
    936   // Fall through.
    937 
    938   bind(&done);
    939 }
    940 
    941 
    942 void MacroAssembler::EnterFrame(StackFrame::Type type) {
    943   // r0-r3: preserved
    944   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
    945   mov(ip, Operand(Smi::FromInt(type)));
    946   push(ip);
    947   mov(ip, Operand(CodeObject()));
    948   push(ip);
    949   add(fp, sp, Operand(3 * kPointerSize));  // Adjust FP to point to saved FP.
    950 }
    951 
    952 
    953 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
    954   // r0: preserved
    955   // r1: preserved
    956   // r2: preserved
    957 
    958   // Drop the execution stack down to the frame pointer and restore
    959   // the caller frame pointer and return address.
    960   mov(sp, fp);
    961   ldm(ia_w, sp, fp.bit() | lr.bit());
    962 }
    963 
    964 
    965 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
    966   // Set up the frame structure on the stack.
    967   ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
    968   ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
    969   ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
    970   Push(lr, fp);
    971   mov(fp, Operand(sp));  // Set up new frame pointer.
    972   // Reserve room for saved entry sp and code object.
    973   sub(sp, sp, Operand(2 * kPointerSize));
    974   if (emit_debug_code()) {
    975     mov(ip, Operand::Zero());
    976     str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
    977   }
    978   mov(ip, Operand(CodeObject()));
    979   str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
    980 
    981   // Save the frame pointer and the context in top.
    982   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
    983   str(fp, MemOperand(ip));
    984   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
    985   str(cp, MemOperand(ip));
    986 
    987   // Optionally save all double registers.
    988   if (save_doubles) {
    989     SaveFPRegs(sp, ip);
    990     // Note that d0 will be accessible at
    991     //   fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
    992     // since the sp slot and code slot were pushed after the fp.
    993   }
    994 
    995   // Reserve place for the return address and stack space and align the frame
    996   // preparing for calling the runtime function.
    997   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
    998   sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
    999   if (frame_alignment > 0) {
   1000     ASSERT(IsPowerOf2(frame_alignment));
   1001     and_(sp, sp, Operand(-frame_alignment));
   1002   }
   1003 
   1004   // Set the exit frame sp value to point just before the return address
   1005   // location.
   1006   add(ip, sp, Operand(kPointerSize));
   1007   str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
   1008 }
   1009 
   1010 
   1011 void MacroAssembler::InitializeNewString(Register string,
   1012                                          Register length,
   1013                                          Heap::RootListIndex map_index,
   1014                                          Register scratch1,
   1015                                          Register scratch2) {
   1016   SmiTag(scratch1, length);
   1017   LoadRoot(scratch2, map_index);
   1018   str(scratch1, FieldMemOperand(string, String::kLengthOffset));
   1019   mov(scratch1, Operand(String::kEmptyHashField));
   1020   str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
   1021   str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
   1022 }
   1023 
   1024 
   1025 int MacroAssembler::ActivationFrameAlignment() {
   1026 #if V8_HOST_ARCH_ARM
   1027   // Running on the real platform. Use the alignment as mandated by the local
   1028   // environment.
   1029   // Note: This will break if we ever start generating snapshots on one ARM
   1030   // platform for another ARM platform with a different alignment.
   1031   return OS::ActivationFrameAlignment();
   1032 #else  // V8_HOST_ARCH_ARM
   1033   // If we are using the simulator then we should always align to the expected
   1034   // alignment. As the simulator is used to generate snapshots we do not know
   1035   // if the target platform will need alignment, so this is controlled from a
   1036   // flag.
   1037   return FLAG_sim_stack_alignment;
   1038 #endif  // V8_HOST_ARCH_ARM
   1039 }
   1040 
   1041 
   1042 void MacroAssembler::LeaveExitFrame(bool save_doubles,
   1043                                     Register argument_count) {
   1044   // Optionally restore all double registers.
   1045   if (save_doubles) {
   1046     // Calculate the stack location of the saved doubles and restore them.
   1047     const int offset = 2 * kPointerSize;
   1048     sub(r3, fp,
   1049         Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
   1050     RestoreFPRegs(r3, ip);
   1051   }
   1052 
   1053   // Clear top frame.
   1054   mov(r3, Operand::Zero());
   1055   mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1056   str(r3, MemOperand(ip));
   1057 
   1058   // Restore current context from top and clear it in debug mode.
   1059   mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
   1060   ldr(cp, MemOperand(ip));
   1061 #ifdef DEBUG
   1062   str(r3, MemOperand(ip));
   1063 #endif
   1064 
   1065   // Tear down the exit frame, pop the arguments, and return.
   1066   mov(sp, Operand(fp));
   1067   ldm(ia_w, sp, fp.bit() | lr.bit());
   1068   if (argument_count.is_valid()) {
   1069     add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
   1070   }
   1071 }
   1072 
   1073 
   1074 void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
   1075   if (use_eabi_hardfloat()) {
   1076     Move(dst, d0);
   1077   } else {
   1078     vmov(dst, r0, r1);
   1079   }
   1080 }
   1081 
   1082 
   1083 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   1084   // This macro takes the dst register to make the code more readable
   1085   // at the call sites. However, the dst register has to be r5 to
   1086   // follow the calling convention which requires the call type to be
   1087   // in r5.
   1088   ASSERT(dst.is(r5));
   1089   if (call_kind == CALL_AS_FUNCTION) {
   1090     mov(dst, Operand(Smi::FromInt(1)));
   1091   } else {
   1092     mov(dst, Operand(Smi::FromInt(0)));
   1093   }
   1094 }
   1095 
   1096 
   1097 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   1098                                     const ParameterCount& actual,
   1099                                     Handle<Code> code_constant,
   1100                                     Register code_reg,
   1101                                     Label* done,
   1102                                     bool* definitely_mismatches,
   1103                                     InvokeFlag flag,
   1104                                     const CallWrapper& call_wrapper,
   1105                                     CallKind call_kind) {
   1106   bool definitely_matches = false;
   1107   *definitely_mismatches = false;
   1108   Label regular_invoke;
   1109 
   1110   // Check whether the expected and actual arguments count match. If not,
   1111   // setup registers according to contract with ArgumentsAdaptorTrampoline:
   1112   //  r0: actual arguments count
   1113   //  r1: function (passed through to callee)
   1114   //  r2: expected arguments count
   1115   //  r3: callee code entry
   1116 
   1117   // The code below is made a lot easier because the calling code already sets
   1118   // up actual and expected registers according to the contract if values are
   1119   // passed in registers.
   1120   ASSERT(actual.is_immediate() || actual.reg().is(r0));
   1121   ASSERT(expected.is_immediate() || expected.reg().is(r2));
   1122   ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
   1123 
   1124   if (expected.is_immediate()) {
   1125     ASSERT(actual.is_immediate());
   1126     if (expected.immediate() == actual.immediate()) {
   1127       definitely_matches = true;
   1128     } else {
   1129       mov(r0, Operand(actual.immediate()));
   1130       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   1131       if (expected.immediate() == sentinel) {
   1132         // Don't worry about adapting arguments for builtins that
   1133         // don't want that done. Skip adaption code by making it look
   1134         // like we have a match between expected and actual number of
   1135         // arguments.
   1136         definitely_matches = true;
   1137       } else {
   1138         *definitely_mismatches = true;
   1139         mov(r2, Operand(expected.immediate()));
   1140       }
   1141     }
   1142   } else {
   1143     if (actual.is_immediate()) {
   1144       cmp(expected.reg(), Operand(actual.immediate()));
   1145       b(eq, &regular_invoke);
   1146       mov(r0, Operand(actual.immediate()));
   1147     } else {
   1148       cmp(expected.reg(), Operand(actual.reg()));
   1149       b(eq, &regular_invoke);
   1150     }
   1151   }
   1152 
   1153   if (!definitely_matches) {
   1154     if (!code_constant.is_null()) {
   1155       mov(r3, Operand(code_constant));
   1156       add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
   1157     }
   1158 
   1159     Handle<Code> adaptor =
   1160         isolate()->builtins()->ArgumentsAdaptorTrampoline();
   1161     if (flag == CALL_FUNCTION) {
   1162       call_wrapper.BeforeCall(CallSize(adaptor));
   1163       SetCallKind(r5, call_kind);
   1164       Call(adaptor);
   1165       call_wrapper.AfterCall();
   1166       if (!*definitely_mismatches) {
   1167         b(done);
   1168       }
   1169     } else {
   1170       SetCallKind(r5, call_kind);
   1171       Jump(adaptor, RelocInfo::CODE_TARGET);
   1172     }
   1173     bind(&regular_invoke);
   1174   }
   1175 }
   1176 
   1177 
   1178 void MacroAssembler::InvokeCode(Register code,
   1179                                 const ParameterCount& expected,
   1180                                 const ParameterCount& actual,
   1181                                 InvokeFlag flag,
   1182                                 const CallWrapper& call_wrapper,
   1183                                 CallKind call_kind) {
   1184   // You can't call a function without a valid frame.
   1185   ASSERT(flag == JUMP_FUNCTION || has_frame());
   1186 
   1187   Label done;
   1188   bool definitely_mismatches = false;
   1189   InvokePrologue(expected, actual, Handle<Code>::null(), code,
   1190                  &done, &definitely_mismatches, flag,
   1191                  call_wrapper, call_kind);
   1192   if (!definitely_mismatches) {
   1193     if (flag == CALL_FUNCTION) {
   1194       call_wrapper.BeforeCall(CallSize(code));
   1195       SetCallKind(r5, call_kind);
   1196       Call(code);
   1197       call_wrapper.AfterCall();
   1198     } else {
   1199       ASSERT(flag == JUMP_FUNCTION);
   1200       SetCallKind(r5, call_kind);
   1201       Jump(code);
   1202     }
   1203 
   1204     // Continue here if InvokePrologue does handle the invocation due to
   1205     // mismatched parameter counts.
   1206     bind(&done);
   1207   }
   1208 }
   1209 
   1210 
   1211 void MacroAssembler::InvokeCode(Handle<Code> code,
   1212                                 const ParameterCount& expected,
   1213                                 const ParameterCount& actual,
   1214                                 RelocInfo::Mode rmode,
   1215                                 InvokeFlag flag,
   1216                                 CallKind call_kind) {
   1217   // You can't call a function without a valid frame.
   1218   ASSERT(flag == JUMP_FUNCTION || has_frame());
   1219 
   1220   Label done;
   1221   bool definitely_mismatches = false;
   1222   InvokePrologue(expected, actual, code, no_reg,
   1223                  &done, &definitely_mismatches, flag,
   1224                  NullCallWrapper(), call_kind);
   1225   if (!definitely_mismatches) {
   1226     if (flag == CALL_FUNCTION) {
   1227       SetCallKind(r5, call_kind);
   1228       Call(code, rmode);
   1229     } else {
   1230       SetCallKind(r5, call_kind);
   1231       Jump(code, rmode);
   1232     }
   1233 
   1234     // Continue here if InvokePrologue does handle the invocation due to
   1235     // mismatched parameter counts.
   1236     bind(&done);
   1237   }
   1238 }
   1239 
   1240 
   1241 void MacroAssembler::InvokeFunction(Register fun,
   1242                                     const ParameterCount& actual,
   1243                                     InvokeFlag flag,
   1244                                     const CallWrapper& call_wrapper,
   1245                                     CallKind call_kind) {
   1246   // You can't call a function without a valid frame.
   1247   ASSERT(flag == JUMP_FUNCTION || has_frame());
   1248 
   1249   // Contract with called JS functions requires that function is passed in r1.
   1250   ASSERT(fun.is(r1));
   1251 
   1252   Register expected_reg = r2;
   1253   Register code_reg = r3;
   1254 
   1255   ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   1256   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   1257   ldr(expected_reg,
   1258       FieldMemOperand(code_reg,
   1259                       SharedFunctionInfo::kFormalParameterCountOffset));
   1260   SmiUntag(expected_reg);
   1261   ldr(code_reg,
   1262       FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   1263 
   1264   ParameterCount expected(expected_reg);
   1265   InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
   1266 }
   1267 
   1268 
   1269 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
   1270                                     const ParameterCount& expected,
   1271                                     const ParameterCount& actual,
   1272                                     InvokeFlag flag,
   1273                                     const CallWrapper& call_wrapper,
   1274                                     CallKind call_kind) {
   1275   // You can't call a function without a valid frame.
   1276   ASSERT(flag == JUMP_FUNCTION || has_frame());
   1277 
   1278   // Get the function and setup the context.
   1279   LoadHeapObject(r1, function);
   1280   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   1281 
   1282   // We call indirectly through the code field in the function to
   1283   // allow recompilation to take effect without changing any of the
   1284   // call sites.
   1285   ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   1286   InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
   1287 }
   1288 
   1289 
   1290 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
   1291                                           Register map,
   1292                                           Register scratch,
   1293                                           Label* fail) {
   1294   ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
   1295   IsInstanceJSObjectType(map, scratch, fail);
   1296 }
   1297 
   1298 
   1299 void MacroAssembler::IsInstanceJSObjectType(Register map,
   1300                                             Register scratch,
   1301                                             Label* fail) {
   1302   ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   1303   cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   1304   b(lt, fail);
   1305   cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
   1306   b(gt, fail);
   1307 }
   1308 
   1309 
   1310 void MacroAssembler::IsObjectJSStringType(Register object,
   1311                                           Register scratch,
   1312                                           Label* fail) {
   1313   ASSERT(kNotStringTag != 0);
   1314 
   1315   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   1316   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1317   tst(scratch, Operand(kIsNotStringMask));
   1318   b(ne, fail);
   1319 }
   1320 
   1321 
   1322 void MacroAssembler::IsObjectNameType(Register object,
   1323                                       Register scratch,
   1324                                       Label* fail) {
   1325   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   1326   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1327   cmp(scratch, Operand(LAST_NAME_TYPE));
   1328   b(hi, fail);
   1329 }
   1330 
   1331 
   1332 #ifdef ENABLE_DEBUGGER_SUPPORT
   1333 void MacroAssembler::DebugBreak() {
   1334   mov(r0, Operand::Zero());
   1335   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   1336   CEntryStub ces(1);
   1337   ASSERT(AllowThisStubCall(&ces));
   1338   Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
   1339 }
   1340 #endif
   1341 
   1342 
   1343 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
   1344                                     int handler_index) {
   1345   // Adjust this code if not the case.
   1346   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   1347   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
   1348   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   1349   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   1350   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   1351   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   1352 
   1353   // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
   1354   // We will build up the handler from the bottom by pushing on the stack.
   1355   // Set up the code object (r5) and the state (r6) for pushing.
   1356   unsigned state =
   1357       StackHandler::IndexField::encode(handler_index) |
   1358       StackHandler::KindField::encode(kind);
   1359   mov(r5, Operand(CodeObject()));
   1360   mov(r6, Operand(state));
   1361 
   1362   // Push the frame pointer, context, state, and code object.
   1363   if (kind == StackHandler::JS_ENTRY) {
   1364     mov(r7, Operand(Smi::FromInt(0)));  // Indicates no context.
   1365     mov(ip, Operand::Zero());  // NULL frame pointer.
   1366     stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
   1367   } else {
   1368     stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
   1369   }
   1370 
   1371   // Link the current handler as the next handler.
   1372   mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   1373   ldr(r5, MemOperand(r6));
   1374   push(r5);
   1375   // Set this new handler as the current one.
   1376   str(sp, MemOperand(r6));
   1377 }
   1378 
   1379 
   1380 void MacroAssembler::PopTryHandler() {
   1381   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   1382   pop(r1);
   1383   mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   1384   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
   1385   str(r1, MemOperand(ip));
   1386 }
   1387 
   1388 
   1389 void MacroAssembler::JumpToHandlerEntry() {
   1390   // Compute the handler entry address and jump to it.  The handler table is
   1391   // a fixed array of (smi-tagged) code offsets.
   1392   // r0 = exception, r1 = code object, r2 = state.
   1393   ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
   1394   add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   1395   mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
   1396   ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
   1397   add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
   1398   add(pc, r1, Operand::SmiUntag(r2));  // Jump
   1399 }
   1400 
   1401 
   1402 void MacroAssembler::Throw(Register value) {
   1403   // Adjust this code if not the case.
   1404   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   1405   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   1406   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   1407   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   1408   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   1409   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   1410 
   1411   // The exception is expected in r0.
   1412   if (!value.is(r0)) {
   1413     mov(r0, value);
   1414   }
   1415   // Drop the stack pointer to the top of the top handler.
   1416   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   1417   ldr(sp, MemOperand(r3));
   1418   // Restore the next handler.
   1419   pop(r2);
   1420   str(r2, MemOperand(r3));
   1421 
   1422   // Get the code object (r1) and state (r2).  Restore the context and frame
   1423   // pointer.
   1424   ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
   1425 
   1426   // If the handler is a JS frame, restore the context to the frame.
   1427   // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
   1428   // or cp.
   1429   tst(cp, cp);
   1430   str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
   1431 
   1432   JumpToHandlerEntry();
   1433 }
   1434 
   1435 
   1436 void MacroAssembler::ThrowUncatchable(Register value) {
   1437   // Adjust this code if not the case.
   1438   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   1439   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
   1440   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   1441   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   1442   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   1443   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   1444 
   1445   // The exception is expected in r0.
   1446   if (!value.is(r0)) {
   1447     mov(r0, value);
   1448   }
   1449   // Drop the stack pointer to the top of the top stack handler.
   1450   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   1451   ldr(sp, MemOperand(r3));
   1452 
   1453   // Unwind the handlers until the ENTRY handler is found.
   1454   Label fetch_next, check_kind;
   1455   jmp(&check_kind);
   1456   bind(&fetch_next);
   1457   ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
   1458 
   1459   bind(&check_kind);
   1460   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
   1461   ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
   1462   tst(r2, Operand(StackHandler::KindField::kMask));
   1463   b(ne, &fetch_next);
   1464 
   1465   // Set the top handler address to next handler past the top ENTRY handler.
   1466   pop(r2);
   1467   str(r2, MemOperand(r3));
   1468   // Get the code object (r1) and state (r2).  Clear the context and frame
   1469   // pointer (0 was saved in the handler).
   1470   ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
   1471 
   1472   JumpToHandlerEntry();
   1473 }
   1474 
   1475 
   1476 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   1477                                             Register scratch,
   1478                                             Label* miss) {
   1479   Label same_contexts;
   1480 
   1481   ASSERT(!holder_reg.is(scratch));
   1482   ASSERT(!holder_reg.is(ip));
   1483   ASSERT(!scratch.is(ip));
   1484 
   1485   // Load current lexical context from the stack frame.
   1486   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1487   // In debug mode, make sure the lexical context is set.
   1488 #ifdef DEBUG
   1489   cmp(scratch, Operand::Zero());
   1490   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
   1491 #endif
   1492 
   1493   // Load the native context of the current context.
   1494   int offset =
   1495       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
   1496   ldr(scratch, FieldMemOperand(scratch, offset));
   1497   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
   1498 
   1499   // Check the context is a native context.
   1500   if (emit_debug_code()) {
   1501     // Cannot use ip as a temporary in this verification code. Due to the fact
   1502     // that ip is clobbered as part of cmp with an object Operand.
   1503     push(holder_reg);  // Temporarily save holder on the stack.
   1504     // Read the first word and compare to the native_context_map.
   1505     ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
   1506     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
   1507     cmp(holder_reg, ip);
   1508     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
   1509     pop(holder_reg);  // Restore holder.
   1510   }
   1511 
   1512   // Check if both contexts are the same.
   1513   ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   1514   cmp(scratch, Operand(ip));
   1515   b(eq, &same_contexts);
   1516 
   1517   // Check the context is a native context.
   1518   if (emit_debug_code()) {
   1519     // Cannot use ip as a temporary in this verification code. Due to the fact
   1520     // that ip is clobbered as part of cmp with an object Operand.
   1521     push(holder_reg);  // Temporarily save holder on the stack.
   1522     mov(holder_reg, ip);  // Move ip to its holding place.
   1523     LoadRoot(ip, Heap::kNullValueRootIndex);
   1524     cmp(holder_reg, ip);
   1525     Check(ne, kJSGlobalProxyContextShouldNotBeNull);
   1526 
   1527     ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
   1528     LoadRoot(ip, Heap::kNativeContextMapRootIndex);
   1529     cmp(holder_reg, ip);
   1530     Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
   1531     // Restore ip is not needed. ip is reloaded below.
   1532     pop(holder_reg);  // Restore holder.
   1533     // Restore ip to holder's context.
   1534     ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
   1535   }
   1536 
   1537   // Check that the security token in the calling global object is
   1538   // compatible with the security token in the receiving global
   1539   // object.
   1540   int token_offset = Context::kHeaderSize +
   1541                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
   1542 
   1543   ldr(scratch, FieldMemOperand(scratch, token_offset));
   1544   ldr(ip, FieldMemOperand(ip, token_offset));
   1545   cmp(scratch, Operand(ip));
   1546   b(ne, miss);
   1547 
   1548   bind(&same_contexts);
   1549 }
   1550 
   1551 
   1552 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
   1553   // First of all we assign the hash seed to scratch.
   1554   LoadRoot(scratch, Heap::kHashSeedRootIndex);
   1555   SmiUntag(scratch);
   1556 
   1557   // Xor original key with a seed.
   1558   eor(t0, t0, Operand(scratch));
   1559 
   1560   // Compute the hash code from the untagged key.  This must be kept in sync
   1561   // with ComputeIntegerHash in utils.h.
   1562   //
   1563   // hash = ~hash + (hash << 15);
   1564   mvn(scratch, Operand(t0));
   1565   add(t0, scratch, Operand(t0, LSL, 15));
   1566   // hash = hash ^ (hash >> 12);
   1567   eor(t0, t0, Operand(t0, LSR, 12));
   1568   // hash = hash + (hash << 2);
   1569   add(t0, t0, Operand(t0, LSL, 2));
   1570   // hash = hash ^ (hash >> 4);
   1571   eor(t0, t0, Operand(t0, LSR, 4));
   1572   // hash = hash * 2057;
   1573   mov(scratch, Operand(t0, LSL, 11));
   1574   add(t0, t0, Operand(t0, LSL, 3));
   1575   add(t0, t0, scratch);
   1576   // hash = hash ^ (hash >> 16);
   1577   eor(t0, t0, Operand(t0, LSR, 16));
   1578 }
   1579 
   1580 
   1581 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   1582                                               Register elements,
   1583                                               Register key,
   1584                                               Register result,
   1585                                               Register t0,
   1586                                               Register t1,
   1587                                               Register t2) {
   1588   // Register use:
   1589   //
   1590   // elements - holds the slow-case elements of the receiver on entry.
   1591   //            Unchanged unless 'result' is the same register.
   1592   //
   1593   // key      - holds the smi key on entry.
   1594   //            Unchanged unless 'result' is the same register.
   1595   //
   1596   // result   - holds the result on exit if the load succeeded.
   1597   //            Allowed to be the same as 'key' or 'result'.
   1598   //            Unchanged on bailout so 'key' or 'result' can be used
   1599   //            in further computation.
   1600   //
   1601   // Scratch registers:
   1602   //
   1603   // t0 - holds the untagged key on entry and holds the hash once computed.
   1604   //
   1605   // t1 - used to hold the capacity mask of the dictionary
   1606   //
   1607   // t2 - used for the index into the dictionary.
   1608   Label done;
   1609 
   1610   GetNumberHash(t0, t1);
   1611 
   1612   // Compute the capacity mask.
   1613   ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
   1614   SmiUntag(t1);
   1615   sub(t1, t1, Operand(1));
   1616 
   1617   // Generate an unrolled loop that performs a few probes before giving up.
   1618   static const int kProbes = 4;
   1619   for (int i = 0; i < kProbes; i++) {
   1620     // Use t2 for index calculations and keep the hash intact in t0.
   1621     mov(t2, t0);
   1622     // Compute the masked index: (hash + i + i * i) & mask.
   1623     if (i > 0) {
   1624       add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
   1625     }
   1626     and_(t2, t2, Operand(t1));
   1627 
   1628     // Scale the index by multiplying by the element size.
   1629     ASSERT(SeededNumberDictionary::kEntrySize == 3);
   1630     add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
   1631 
   1632     // Check if the key is identical to the name.
   1633     add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
   1634     ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
   1635     cmp(key, Operand(ip));
   1636     if (i != kProbes - 1) {
   1637       b(eq, &done);
   1638     } else {
   1639       b(ne, miss);
   1640     }
   1641   }
   1642 
   1643   bind(&done);
   1644   // Check that the value is a normal property.
   1645   // t2: elements + (index * kPointerSize)
   1646   const int kDetailsOffset =
   1647       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   1648   ldr(t1, FieldMemOperand(t2, kDetailsOffset));
   1649   tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
   1650   b(ne, miss);
   1651 
   1652   // Get the value at the masked, scaled index and return.
   1653   const int kValueOffset =
   1654       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
   1655   ldr(result, FieldMemOperand(t2, kValueOffset));
   1656 }
   1657 
   1658 
   1659 void MacroAssembler::Allocate(int object_size,
   1660                               Register result,
   1661                               Register scratch1,
   1662                               Register scratch2,
   1663                               Label* gc_required,
   1664                               AllocationFlags flags) {
   1665   ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
   1666   if (!FLAG_inline_new) {
   1667     if (emit_debug_code()) {
   1668       // Trash the registers to simulate an allocation failure.
   1669       mov(result, Operand(0x7091));
   1670       mov(scratch1, Operand(0x7191));
   1671       mov(scratch2, Operand(0x7291));
   1672     }
   1673     jmp(gc_required);
   1674     return;
   1675   }
   1676 
   1677   ASSERT(!result.is(scratch1));
   1678   ASSERT(!result.is(scratch2));
   1679   ASSERT(!scratch1.is(scratch2));
   1680   ASSERT(!scratch1.is(ip));
   1681   ASSERT(!scratch2.is(ip));
   1682 
   1683   // Make object size into bytes.
   1684   if ((flags & SIZE_IN_WORDS) != 0) {
   1685     object_size *= kPointerSize;
   1686   }
   1687   ASSERT_EQ(0, object_size & kObjectAlignmentMask);
   1688 
   1689   // Check relative positions of allocation top and limit addresses.
   1690   // The values must be adjacent in memory to allow the use of LDM.
   1691   // Also, assert that the registers are numbered such that the values
   1692   // are loaded in the correct order.
   1693   ExternalReference allocation_top =
   1694       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   1695   ExternalReference allocation_limit =
   1696       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   1697 
   1698   intptr_t top   =
   1699       reinterpret_cast<intptr_t>(allocation_top.address());
   1700   intptr_t limit =
   1701       reinterpret_cast<intptr_t>(allocation_limit.address());
   1702   ASSERT((limit - top) == kPointerSize);
   1703   ASSERT(result.code() < ip.code());
   1704 
   1705   // Set up allocation top address and object size registers.
   1706   Register topaddr = scratch1;
   1707   Register obj_size_reg = scratch2;
   1708   mov(topaddr, Operand(allocation_top));
   1709   Operand obj_size_operand = Operand(object_size);
   1710   if (!obj_size_operand.is_single_instruction(this)) {
   1711     // We are about to steal IP, so we need to load this value first
   1712     mov(obj_size_reg, obj_size_operand);
   1713   }
   1714 
   1715   // This code stores a temporary value in ip. This is OK, as the code below
   1716   // does not need ip for implicit literal generation.
   1717   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   1718     // Load allocation top into result and allocation limit into ip.
   1719     ldm(ia, topaddr, result.bit() | ip.bit());
   1720   } else {
   1721     if (emit_debug_code()) {
   1722       // Assert that result actually contains top on entry. ip is used
   1723       // immediately below so this use of ip does not cause difference with
   1724       // respect to register content between debug and release mode.
   1725       ldr(ip, MemOperand(topaddr));
   1726       cmp(result, ip);
   1727       Check(eq, kUnexpectedAllocationTop);
   1728     }
   1729     // Load allocation limit into ip. Result already contains allocation top.
   1730     ldr(ip, MemOperand(topaddr, limit - top));
   1731   }
   1732 
   1733   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   1734     // Align the next allocation. Storing the filler map without checking top is
   1735     // safe in new-space because the limit of the heap is aligned there.
   1736     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
   1737     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
   1738     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
   1739     Label aligned;
   1740     b(eq, &aligned);
   1741     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
   1742       cmp(result, Operand(ip));
   1743       b(hs, gc_required);
   1744     }
   1745     mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   1746     str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
   1747     bind(&aligned);
   1748   }
   1749 
   1750   // Calculate new top and bail out if new space is exhausted. Use result
   1751   // to calculate the new top.
   1752   if (obj_size_operand.is_single_instruction(this)) {
   1753     // We can add the size as an immediate
   1754     add(scratch2, result, obj_size_operand, SetCC);
   1755   } else {
   1756     // Doesn't fit in an immediate, we have to use the register
   1757     add(scratch2, result, obj_size_reg, SetCC);
   1758   }
   1759   b(cs, gc_required);
   1760   cmp(scratch2, Operand(ip));
   1761   b(hi, gc_required);
   1762   str(scratch2, MemOperand(topaddr));
   1763 
   1764   // Tag object if requested.
   1765   if ((flags & TAG_OBJECT) != 0) {
   1766     add(result, result, Operand(kHeapObjectTag));
   1767   }
   1768 }
   1769 
   1770 
   1771 void MacroAssembler::Allocate(Register object_size,
   1772                               Register result,
   1773                               Register scratch1,
   1774                               Register scratch2,
   1775                               Label* gc_required,
   1776                               AllocationFlags flags) {
   1777   if (!FLAG_inline_new) {
   1778     if (emit_debug_code()) {
   1779       // Trash the registers to simulate an allocation failure.
   1780       mov(result, Operand(0x7091));
   1781       mov(scratch1, Operand(0x7191));
   1782       mov(scratch2, Operand(0x7291));
   1783     }
   1784     jmp(gc_required);
   1785     return;
   1786   }
   1787 
   1788   // Assert that the register arguments are different and that none of
   1789   // them are ip. ip is used explicitly in the code generated below.
   1790   ASSERT(!result.is(scratch1));
   1791   ASSERT(!result.is(scratch2));
   1792   ASSERT(!scratch1.is(scratch2));
   1793   ASSERT(!object_size.is(ip));
   1794   ASSERT(!result.is(ip));
   1795   ASSERT(!scratch1.is(ip));
   1796   ASSERT(!scratch2.is(ip));
   1797 
   1798   // Check relative positions of allocation top and limit addresses.
   1799   // The values must be adjacent in memory to allow the use of LDM.
   1800   // Also, assert that the registers are numbered such that the values
   1801   // are loaded in the correct order.
   1802   ExternalReference allocation_top =
   1803       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   1804   ExternalReference allocation_limit =
   1805       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   1806   intptr_t top =
   1807       reinterpret_cast<intptr_t>(allocation_top.address());
   1808   intptr_t limit =
   1809       reinterpret_cast<intptr_t>(allocation_limit.address());
   1810   ASSERT((limit - top) == kPointerSize);
   1811   ASSERT(result.code() < ip.code());
   1812 
   1813   // Set up allocation top address.
   1814   Register topaddr = scratch1;
   1815   mov(topaddr, Operand(allocation_top));
   1816 
   1817   // This code stores a temporary value in ip. This is OK, as the code below
   1818   // does not need ip for implicit literal generation.
   1819   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   1820     // Load allocation top into result and allocation limit into ip.
   1821     ldm(ia, topaddr, result.bit() | ip.bit());
   1822   } else {
   1823     if (emit_debug_code()) {
   1824       // Assert that result actually contains top on entry. ip is used
   1825       // immediately below so this use of ip does not cause difference with
   1826       // respect to register content between debug and release mode.
   1827       ldr(ip, MemOperand(topaddr));
   1828       cmp(result, ip);
   1829       Check(eq, kUnexpectedAllocationTop);
   1830     }
   1831     // Load allocation limit into ip. Result already contains allocation top.
   1832     ldr(ip, MemOperand(topaddr, limit - top));
   1833   }
   1834 
   1835   if ((flags & DOUBLE_ALIGNMENT) != 0) {
   1836     // Align the next allocation. Storing the filler map without checking top is
   1837     // safe in new-space because the limit of the heap is aligned there.
   1838     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
   1839     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
   1840     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
   1841     Label aligned;
   1842     b(eq, &aligned);
   1843     if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
   1844       cmp(result, Operand(ip));
   1845       b(hs, gc_required);
   1846     }
   1847     mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   1848     str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
   1849     bind(&aligned);
   1850   }
   1851 
   1852   // Calculate new top and bail out if new space is exhausted. Use result
   1853   // to calculate the new top. Object size may be in words so a shift is
   1854   // required to get the number of bytes.
   1855   if ((flags & SIZE_IN_WORDS) != 0) {
   1856     add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
   1857   } else {
   1858     add(scratch2, result, Operand(object_size), SetCC);
   1859   }
   1860   b(cs, gc_required);
   1861   cmp(scratch2, Operand(ip));
   1862   b(hi, gc_required);
   1863 
   1864   // Update allocation top. result temporarily holds the new top.
   1865   if (emit_debug_code()) {
   1866     tst(scratch2, Operand(kObjectAlignmentMask));
   1867     Check(eq, kUnalignedAllocationInNewSpace);
   1868   }
   1869   str(scratch2, MemOperand(topaddr));
   1870 
   1871   // Tag object if requested.
   1872   if ((flags & TAG_OBJECT) != 0) {
   1873     add(result, result, Operand(kHeapObjectTag));
   1874   }
   1875 }
   1876 
   1877 
   1878 void MacroAssembler::UndoAllocationInNewSpace(Register object,
   1879                                               Register scratch) {
   1880   ExternalReference new_space_allocation_top =
   1881       ExternalReference::new_space_allocation_top_address(isolate());
   1882 
   1883   // Make sure the object has no tag before resetting top.
   1884   and_(object, object, Operand(~kHeapObjectTagMask));
   1885 #ifdef DEBUG
   1886   // Check that the object un-allocated is below the current top.
   1887   mov(scratch, Operand(new_space_allocation_top));
   1888   ldr(scratch, MemOperand(scratch));
   1889   cmp(object, scratch);
   1890   Check(lt, kUndoAllocationOfNonAllocatedMemory);
   1891 #endif
   1892   // Write the address of the object to un-allocate as the current top.
   1893   mov(scratch, Operand(new_space_allocation_top));
   1894   str(object, MemOperand(scratch));
   1895 }
   1896 
   1897 
   1898 void MacroAssembler::AllocateTwoByteString(Register result,
   1899                                            Register length,
   1900                                            Register scratch1,
   1901                                            Register scratch2,
   1902                                            Register scratch3,
   1903                                            Label* gc_required) {
   1904   // Calculate the number of bytes needed for the characters in the string while
   1905   // observing object alignment.
   1906   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   1907   mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
   1908   add(scratch1, scratch1,
   1909       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
   1910   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
   1911 
   1912   // Allocate two-byte string in new space.
   1913   Allocate(scratch1,
   1914            result,
   1915            scratch2,
   1916            scratch3,
   1917            gc_required,
   1918            TAG_OBJECT);
   1919 
   1920   // Set the map, length and hash field.
   1921   InitializeNewString(result,
   1922                       length,
   1923                       Heap::kStringMapRootIndex,
   1924                       scratch1,
   1925                       scratch2);
   1926 }
   1927 
   1928 
   1929 void MacroAssembler::AllocateAsciiString(Register result,
   1930                                          Register length,
   1931                                          Register scratch1,
   1932                                          Register scratch2,
   1933                                          Register scratch3,
   1934                                          Label* gc_required) {
   1935   // Calculate the number of bytes needed for the characters in the string while
   1936   // observing object alignment.
   1937   ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   1938   ASSERT(kCharSize == 1);
   1939   add(scratch1, length,
   1940       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
   1941   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
   1942 
   1943   // Allocate ASCII string in new space.
   1944   Allocate(scratch1,
   1945            result,
   1946            scratch2,
   1947            scratch3,
   1948            gc_required,
   1949            TAG_OBJECT);
   1950 
   1951   // Set the map, length and hash field.
   1952   InitializeNewString(result,
   1953                       length,
   1954                       Heap::kAsciiStringMapRootIndex,
   1955                       scratch1,
   1956                       scratch2);
   1957 }
   1958 
   1959 
   1960 void MacroAssembler::AllocateTwoByteConsString(Register result,
   1961                                                Register length,
   1962                                                Register scratch1,
   1963                                                Register scratch2,
   1964                                                Label* gc_required) {
   1965   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   1966            TAG_OBJECT);
   1967 
   1968   InitializeNewString(result,
   1969                       length,
   1970                       Heap::kConsStringMapRootIndex,
   1971                       scratch1,
   1972                       scratch2);
   1973 }
   1974 
   1975 
   1976 void MacroAssembler::AllocateAsciiConsString(Register result,
   1977                                              Register length,
   1978                                              Register scratch1,
   1979                                              Register scratch2,
   1980                                              Label* gc_required) {
   1981   Label allocate_new_space, install_map;
   1982   AllocationFlags flags = TAG_OBJECT;
   1983 
   1984   ExternalReference high_promotion_mode = ExternalReference::
   1985       new_space_high_promotion_mode_active_address(isolate());
   1986   mov(scratch1, Operand(high_promotion_mode));
   1987   ldr(scratch1, MemOperand(scratch1, 0));
   1988   cmp(scratch1, Operand::Zero());
   1989   b(eq, &allocate_new_space);
   1990 
   1991   Allocate(ConsString::kSize,
   1992            result,
   1993            scratch1,
   1994            scratch2,
   1995            gc_required,
   1996            static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
   1997 
   1998   jmp(&install_map);
   1999 
   2000   bind(&allocate_new_space);
   2001   Allocate(ConsString::kSize,
   2002            result,
   2003            scratch1,
   2004            scratch2,
   2005            gc_required,
   2006            flags);
   2007 
   2008   bind(&install_map);
   2009 
   2010   InitializeNewString(result,
   2011                       length,
   2012                       Heap::kConsAsciiStringMapRootIndex,
   2013                       scratch1,
   2014                       scratch2);
   2015 }
   2016 
   2017 
   2018 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
   2019                                                  Register length,
   2020                                                  Register scratch1,
   2021                                                  Register scratch2,
   2022                                                  Label* gc_required) {
   2023   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   2024            TAG_OBJECT);
   2025 
   2026   InitializeNewString(result,
   2027                       length,
   2028                       Heap::kSlicedStringMapRootIndex,
   2029                       scratch1,
   2030                       scratch2);
   2031 }
   2032 
   2033 
   2034 void MacroAssembler::AllocateAsciiSlicedString(Register result,
   2035                                                Register length,
   2036                                                Register scratch1,
   2037                                                Register scratch2,
   2038                                                Label* gc_required) {
   2039   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   2040            TAG_OBJECT);
   2041 
   2042   InitializeNewString(result,
   2043                       length,
   2044                       Heap::kSlicedAsciiStringMapRootIndex,
   2045                       scratch1,
   2046                       scratch2);
   2047 }
   2048 
   2049 
   2050 void MacroAssembler::CompareObjectType(Register object,
   2051                                        Register map,
   2052                                        Register type_reg,
   2053                                        InstanceType type) {
   2054   ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
   2055   CompareInstanceType(map, type_reg, type);
   2056 }
   2057 
   2058 
   2059 void MacroAssembler::CompareInstanceType(Register map,
   2060                                          Register type_reg,
   2061                                          InstanceType type) {
   2062   ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
   2063   cmp(type_reg, Operand(type));
   2064 }
   2065 
   2066 
   2067 void MacroAssembler::CompareRoot(Register obj,
   2068                                  Heap::RootListIndex index) {
   2069   ASSERT(!obj.is(ip));
   2070   LoadRoot(ip, index);
   2071   cmp(obj, ip);
   2072 }
   2073 
   2074 
   2075 void MacroAssembler::CheckFastElements(Register map,
   2076                                        Register scratch,
   2077                                        Label* fail) {
   2078   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2079   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2080   STATIC_ASSERT(FAST_ELEMENTS == 2);
   2081   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2082   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   2083   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
   2084   b(hi, fail);
   2085 }
   2086 
   2087 
   2088 void MacroAssembler::CheckFastObjectElements(Register map,
   2089                                              Register scratch,
   2090                                              Label* fail) {
   2091   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2092   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2093   STATIC_ASSERT(FAST_ELEMENTS == 2);
   2094   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2095   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   2096   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
   2097   b(ls, fail);
   2098   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
   2099   b(hi, fail);
   2100 }
   2101 
   2102 
   2103 void MacroAssembler::CheckFastSmiElements(Register map,
   2104                                           Register scratch,
   2105                                           Label* fail) {
   2106   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2107   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2108   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   2109   cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
   2110   b(hi, fail);
   2111 }
   2112 
   2113 
   2114 void MacroAssembler::StoreNumberToDoubleElements(
   2115                                       Register value_reg,
   2116                                       Register key_reg,
   2117                                       Register elements_reg,
   2118                                       Register scratch1,
   2119                                       LowDwVfpRegister double_scratch,
   2120                                       Label* fail,
   2121                                       int elements_offset) {
   2122   Label smi_value, store;
   2123 
   2124   // Handle smi values specially.
   2125   JumpIfSmi(value_reg, &smi_value);
   2126 
   2127   // Ensure that the object is a heap number
   2128   CheckMap(value_reg,
   2129            scratch1,
   2130            isolate()->factory()->heap_number_map(),
   2131            fail,
   2132            DONT_DO_SMI_CHECK);
   2133 
   2134   vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
   2135   // Force a canonical NaN.
   2136   if (emit_debug_code()) {
   2137     vmrs(ip);
   2138     tst(ip, Operand(kVFPDefaultNaNModeControlBit));
   2139     Assert(ne, kDefaultNaNModeNotSet);
   2140   }
   2141   VFPCanonicalizeNaN(double_scratch);
   2142   b(&store);
   2143 
   2144   bind(&smi_value);
   2145   SmiToDouble(double_scratch, value_reg);
   2146 
   2147   bind(&store);
   2148   add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
   2149   vstr(double_scratch,
   2150        FieldMemOperand(scratch1,
   2151                        FixedDoubleArray::kHeaderSize - elements_offset));
   2152 }
   2153 
   2154 
   2155 void MacroAssembler::CompareMap(Register obj,
   2156                                 Register scratch,
   2157                                 Handle<Map> map,
   2158                                 Label* early_success) {
   2159   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   2160   CompareMap(scratch, map, early_success);
   2161 }
   2162 
   2163 
   2164 void MacroAssembler::CompareMap(Register obj_map,
   2165                                 Handle<Map> map,
   2166                                 Label* early_success) {
   2167   cmp(obj_map, Operand(map));
   2168 }
   2169 
   2170 
   2171 void MacroAssembler::CheckMap(Register obj,
   2172                               Register scratch,
   2173                               Handle<Map> map,
   2174                               Label* fail,
   2175                               SmiCheckType smi_check_type) {
   2176   if (smi_check_type == DO_SMI_CHECK) {
   2177     JumpIfSmi(obj, fail);
   2178   }
   2179 
   2180   Label success;
   2181   CompareMap(obj, scratch, map, &success);
   2182   b(ne, fail);
   2183   bind(&success);
   2184 }
   2185 
   2186 
   2187 void MacroAssembler::CheckMap(Register obj,
   2188                               Register scratch,
   2189                               Heap::RootListIndex index,
   2190                               Label* fail,
   2191                               SmiCheckType smi_check_type) {
   2192   if (smi_check_type == DO_SMI_CHECK) {
   2193     JumpIfSmi(obj, fail);
   2194   }
   2195   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   2196   LoadRoot(ip, index);
   2197   cmp(scratch, ip);
   2198   b(ne, fail);
   2199 }
   2200 
   2201 
   2202 void MacroAssembler::DispatchMap(Register obj,
   2203                                  Register scratch,
   2204                                  Handle<Map> map,
   2205                                  Handle<Code> success,
   2206                                  SmiCheckType smi_check_type) {
   2207   Label fail;
   2208   if (smi_check_type == DO_SMI_CHECK) {
   2209     JumpIfSmi(obj, &fail);
   2210   }
   2211   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   2212   mov(ip, Operand(map));
   2213   cmp(scratch, ip);
   2214   Jump(success, RelocInfo::CODE_TARGET, eq);
   2215   bind(&fail);
   2216 }
   2217 
   2218 
   2219 void MacroAssembler::TryGetFunctionPrototype(Register function,
   2220                                              Register result,
   2221                                              Register scratch,
   2222                                              Label* miss,
   2223                                              bool miss_on_bound_function) {
   2224   // Check that the receiver isn't a smi.
   2225   JumpIfSmi(function, miss);
   2226 
   2227   // Check that the function really is a function.  Load map into result reg.
   2228   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   2229   b(ne, miss);
   2230 
   2231   if (miss_on_bound_function) {
   2232     ldr(scratch,
   2233         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   2234     ldr(scratch,
   2235         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   2236     tst(scratch,
   2237         Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
   2238     b(ne, miss);
   2239   }
   2240 
   2241   // Make sure that the function has an instance prototype.
   2242   Label non_instance;
   2243   ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   2244   tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
   2245   b(ne, &non_instance);
   2246 
   2247   // Get the prototype or initial map from the function.
   2248   ldr(result,
   2249       FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2250 
   2251   // If the prototype or initial map is the hole, don't return it and
   2252   // simply miss the cache instead. This will allow us to allocate a
   2253   // prototype object on-demand in the runtime system.
   2254   LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2255   cmp(result, ip);
   2256   b(eq, miss);
   2257 
   2258   // If the function does not have an initial map, we're done.
   2259   Label done;
   2260   CompareObjectType(result, scratch, scratch, MAP_TYPE);
   2261   b(ne, &done);
   2262 
   2263   // Get the prototype from the initial map.
   2264   ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2265   jmp(&done);
   2266 
   2267   // Non-instance prototype: Fetch prototype from constructor field
   2268   // in initial map.
   2269   bind(&non_instance);
   2270   ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
   2271 
   2272   // All done.
   2273   bind(&done);
   2274 }
   2275 
   2276 
   2277 void MacroAssembler::CallStub(CodeStub* stub,
   2278                               TypeFeedbackId ast_id,
   2279                               Condition cond) {
   2280   ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   2281   Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
   2282 }
   2283 
   2284 
   2285 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
   2286   ASSERT(allow_stub_calls_ ||
   2287          stub->CompilingCallsToThisStubIsGCSafe(isolate()));
   2288   Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
   2289 }
   2290 
   2291 
   2292 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   2293   return ref0.address() - ref1.address();
   2294 }
   2295 
   2296 
   2297 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
   2298                                               Address function_address,
   2299                                               ExternalReference thunk_ref,
   2300                                               Register thunk_last_arg,
   2301                                               int stack_space,
   2302                                               bool returns_handle,
   2303                                               int return_value_offset) {
   2304   ExternalReference next_address =
   2305       ExternalReference::handle_scope_next_address(isolate());
   2306   const int kNextOffset = 0;
   2307   const int kLimitOffset = AddressOffset(
   2308       ExternalReference::handle_scope_limit_address(isolate()),
   2309       next_address);
   2310   const int kLevelOffset = AddressOffset(
   2311       ExternalReference::handle_scope_level_address(isolate()),
   2312       next_address);
   2313 
   2314   // Allocate HandleScope in callee-save registers.
   2315   mov(r7, Operand(next_address));
   2316   ldr(r4, MemOperand(r7, kNextOffset));
   2317   ldr(r5, MemOperand(r7, kLimitOffset));
   2318   ldr(r6, MemOperand(r7, kLevelOffset));
   2319   add(r6, r6, Operand(1));
   2320   str(r6, MemOperand(r7, kLevelOffset));
   2321 
   2322   if (FLAG_log_timer_events) {
   2323     FrameScope frame(this, StackFrame::MANUAL);
   2324     PushSafepointRegisters();
   2325     PrepareCallCFunction(1, r0);
   2326     mov(r0, Operand(ExternalReference::isolate_address(isolate())));
   2327     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
   2328     PopSafepointRegisters();
   2329   }
   2330 
   2331   ASSERT(!thunk_last_arg.is(r3));
   2332   Label profiler_disabled;
   2333   Label end_profiler_check;
   2334   bool* is_profiling_flag =
   2335       isolate()->cpu_profiler()->is_profiling_address();
   2336   STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
   2337   mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
   2338   ldrb(r3, MemOperand(r3, 0));
   2339   cmp(r3, Operand(0));
   2340   b(eq, &profiler_disabled);
   2341 
   2342   // Additional parameter is the address of the actual callback.
   2343   mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
   2344   mov(r3, Operand(thunk_ref));
   2345   jmp(&end_profiler_check);
   2346 
   2347   bind(&profiler_disabled);
   2348   mov(r3, Operand(function));
   2349   bind(&end_profiler_check);
   2350 
   2351   // Native call returns to the DirectCEntry stub which redirects to the
   2352   // return address pushed on stack (could have moved after GC).
   2353   // DirectCEntry stub itself is generated early and never moves.
   2354   DirectCEntryStub stub;
   2355   stub.GenerateCall(this, r3);
   2356 
   2357   if (FLAG_log_timer_events) {
   2358     FrameScope frame(this, StackFrame::MANUAL);
   2359     PushSafepointRegisters();
   2360     PrepareCallCFunction(1, r0);
   2361     mov(r0, Operand(ExternalReference::isolate_address(isolate())));
   2362     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
   2363     PopSafepointRegisters();
   2364   }
   2365 
   2366   Label promote_scheduled_exception;
   2367   Label delete_allocated_handles;
   2368   Label leave_exit_frame;
   2369   Label return_value_loaded;
   2370 
   2371   if (returns_handle) {
   2372     Label load_return_value;
   2373     cmp(r0, Operand::Zero());
   2374     b(eq, &load_return_value);
   2375     // derefernce returned value
   2376     ldr(r0, MemOperand(r0));
   2377     b(&return_value_loaded);
   2378     bind(&load_return_value);
   2379   }
   2380   // load value from ReturnValue
   2381   ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
   2382   bind(&return_value_loaded);
   2383   // No more valid handles (the result handle was the last one). Restore
   2384   // previous handle scope.
   2385   str(r4, MemOperand(r7, kNextOffset));
   2386   if (emit_debug_code()) {
   2387     ldr(r1, MemOperand(r7, kLevelOffset));
   2388     cmp(r1, r6);
   2389     Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
   2390   }
   2391   sub(r6, r6, Operand(1));
   2392   str(r6, MemOperand(r7, kLevelOffset));
   2393   ldr(ip, MemOperand(r7, kLimitOffset));
   2394   cmp(r5, ip);
   2395   b(ne, &delete_allocated_handles);
   2396 
   2397   // Check if the function scheduled an exception.
   2398   bind(&leave_exit_frame);
   2399   LoadRoot(r4, Heap::kTheHoleValueRootIndex);
   2400   mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
   2401   ldr(r5, MemOperand(ip));
   2402   cmp(r4, r5);
   2403   b(ne, &promote_scheduled_exception);
   2404 
   2405   // LeaveExitFrame expects unwind space to be in a register.
   2406   mov(r4, Operand(stack_space));
   2407   LeaveExitFrame(false, r4);
   2408   mov(pc, lr);
   2409 
   2410   bind(&promote_scheduled_exception);
   2411   TailCallExternalReference(
   2412       ExternalReference(Runtime::kPromoteScheduledException, isolate()),
   2413       0,
   2414       1);
   2415 
   2416   // HandleScope limit has changed. Delete allocated extensions.
   2417   bind(&delete_allocated_handles);
   2418   str(r5, MemOperand(r7, kLimitOffset));
   2419   mov(r4, r0);
   2420   PrepareCallCFunction(1, r5);
   2421   mov(r0, Operand(ExternalReference::isolate_address(isolate())));
   2422   CallCFunction(
   2423       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   2424   mov(r0, r4);
   2425   jmp(&leave_exit_frame);
   2426 }
   2427 
   2428 
   2429 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
   2430   if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
   2431   return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
   2432 }
   2433 
   2434 
   2435 void MacroAssembler::IllegalOperation(int num_arguments) {
   2436   if (num_arguments > 0) {
   2437     add(sp, sp, Operand(num_arguments * kPointerSize));
   2438   }
   2439   LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   2440 }
   2441 
   2442 
   2443 void MacroAssembler::IndexFromHash(Register hash, Register index) {
   2444   // If the hash field contains an array index pick it out. The assert checks
   2445   // that the constants for the maximum number of digits for an array index
   2446   // cached in the hash field and the number of bits reserved for it does not
   2447   // conflict.
   2448   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
   2449          (1 << String::kArrayIndexValueBits));
   2450   // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
   2451   // the low kHashShift bits.
   2452   Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
   2453   SmiTag(index, hash);
   2454 }
   2455 
   2456 
   2457 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
   2458   if (CpuFeatures::IsSupported(VFP3)) {
   2459     vmov(value.low(), smi);
   2460     vcvt_f64_s32(value, 1);
   2461   } else {
   2462     SmiUntag(ip, smi);
   2463     vmov(value.low(), ip);
   2464     vcvt_f64_s32(value, value.low());
   2465   }
   2466 }
   2467 
   2468 
   2469 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
   2470                                        LowDwVfpRegister double_scratch) {
   2471   ASSERT(!double_input.is(double_scratch));
   2472   vcvt_s32_f64(double_scratch.low(), double_input);
   2473   vcvt_f64_s32(double_scratch, double_scratch.low());
   2474   VFPCompareAndSetFlags(double_input, double_scratch);
   2475 }
   2476 
   2477 
   2478 void MacroAssembler::TryDoubleToInt32Exact(Register result,
   2479                                            DwVfpRegister double_input,
   2480                                            LowDwVfpRegister double_scratch) {
   2481   ASSERT(!double_input.is(double_scratch));
   2482   vcvt_s32_f64(double_scratch.low(), double_input);
   2483   vmov(result, double_scratch.low());
   2484   vcvt_f64_s32(double_scratch, double_scratch.low());
   2485   VFPCompareAndSetFlags(double_input, double_scratch);
   2486 }
   2487 
   2488 
   2489 void MacroAssembler::TryInt32Floor(Register result,
   2490                                    DwVfpRegister double_input,
   2491                                    Register input_high,
   2492                                    LowDwVfpRegister double_scratch,
   2493                                    Label* done,
   2494                                    Label* exact) {
   2495   ASSERT(!result.is(input_high));
   2496   ASSERT(!double_input.is(double_scratch));
   2497   Label negative, exception;
   2498 
   2499   VmovHigh(input_high, double_input);
   2500 
   2501   // Test for NaN and infinities.
   2502   Sbfx(result, input_high,
   2503        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
   2504   cmp(result, Operand(-1));
   2505   b(eq, &exception);
   2506   // Test for values that can be exactly represented as a
   2507   // signed 32-bit integer.
   2508   TryDoubleToInt32Exact(result, double_input, double_scratch);
   2509   // If exact, return (result already fetched).
   2510   b(eq, exact);
   2511   cmp(input_high, Operand::Zero());
   2512   b(mi, &negative);
   2513 
   2514   // Input is in ]+0, +inf[.
   2515   // If result equals 0x7fffffff input was out of range or
   2516   // in ]0x7fffffff, 0x80000000[. We ignore this last case which
   2517   // could fits into an int32, that means we always think input was
   2518   // out of range and always go to exception.
   2519   // If result < 0x7fffffff, go to done, result fetched.
   2520   cmn(result, Operand(1));
   2521   b(mi, &exception);
   2522   b(done);
   2523 
   2524   // Input is in ]-inf, -0[.
   2525   // If x is a non integer negative number,
   2526   // floor(x) <=> round_to_zero(x) - 1.
   2527   bind(&negative);
   2528   sub(result, result, Operand(1), SetCC);
   2529   // If result is still negative, go to done, result fetched.
   2530   // Else, we had an overflow and we fall through exception.
   2531   b(mi, done);
   2532   bind(&exception);
   2533 }
   2534 
   2535 
   2536 void MacroAssembler::ECMAToInt32(Register result,
   2537                                  DwVfpRegister double_input,
   2538                                  Register scratch,
   2539                                  Register scratch_high,
   2540                                  Register scratch_low,
   2541                                  LowDwVfpRegister double_scratch) {
   2542   ASSERT(!scratch_high.is(result));
   2543   ASSERT(!scratch_low.is(result));
   2544   ASSERT(!scratch_low.is(scratch_high));
   2545   ASSERT(!scratch.is(result) &&
   2546          !scratch.is(scratch_high) &&
   2547          !scratch.is(scratch_low));
   2548   ASSERT(!double_input.is(double_scratch));
   2549 
   2550   Label out_of_range, only_low, negate, done;
   2551 
   2552   vcvt_s32_f64(double_scratch.low(), double_input);
   2553   vmov(result, double_scratch.low());
   2554 
   2555   // If result is not saturated (0x7fffffff or 0x80000000), we are done.
   2556   sub(scratch, result, Operand(1));
   2557   cmp(scratch, Operand(0x7ffffffe));
   2558   b(lt, &done);
   2559 
   2560   vmov(scratch_low, scratch_high, double_input);
   2561   Ubfx(scratch, scratch_high,
   2562        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
   2563   // Load scratch with exponent - 1. This is faster than loading
   2564   // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
   2565   sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
   2566   // If exponent is greater than or equal to 84, the 32 less significant
   2567   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
   2568   // the result is 0.
   2569   // Compare exponent with 84 (compare exponent - 1 with 83).
   2570   cmp(scratch, Operand(83));
   2571   b(ge, &out_of_range);
   2572 
   2573   // If we reach this code, 31 <= exponent <= 83.
   2574   // So, we don't have to handle cases where 0 <= exponent <= 20 for
   2575   // which we would need to shift right the high part of the mantissa.
   2576   // Scratch contains exponent - 1.
   2577   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
   2578   rsb(scratch, scratch, Operand(51), SetCC);
   2579   b(ls, &only_low);
   2580   // 21 <= exponent <= 51, shift scratch_low and scratch_high
   2581   // to generate the result.
   2582   mov(scratch_low, Operand(scratch_low, LSR, scratch));
   2583   // Scratch contains: 52 - exponent.
   2584   // We needs: exponent - 20.
   2585   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
   2586   rsb(scratch, scratch, Operand(32));
   2587   Ubfx(result, scratch_high,
   2588        0, HeapNumber::kMantissaBitsInTopWord);
   2589   // Set the implicit 1 before the mantissa part in scratch_high.
   2590   orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
   2591   orr(result, scratch_low, Operand(result, LSL, scratch));
   2592   b(&negate);
   2593 
   2594   bind(&out_of_range);
   2595   mov(result, Operand::Zero());
   2596   b(&done);
   2597 
   2598   bind(&only_low);
   2599   // 52 <= exponent <= 83, shift only scratch_low.
   2600   // On entry, scratch contains: 52 - exponent.
   2601   rsb(scratch, scratch, Operand::Zero());
   2602   mov(result, Operand(scratch_low, LSL, scratch));
   2603 
   2604   bind(&negate);
   2605   // If input was positive, scratch_high ASR 31 equals 0 and
   2606   // scratch_high LSR 31 equals zero.
   2607   // New result = (result eor 0) + 0 = result.
   2608   // If the input was negative, we have to negate the result.
   2609   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
   2610   // New result = (result eor 0xffffffff) + 1 = 0 - result.
   2611   eor(result, result, Operand(scratch_high, ASR, 31));
   2612   add(result, result, Operand(scratch_high, LSR, 31));
   2613 
   2614   bind(&done);
   2615 }
   2616 
   2617 
   2618 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
   2619                                          Register src,
   2620                                          int num_least_bits) {
   2621   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
   2622     ubfx(dst, src, kSmiTagSize, num_least_bits);
   2623   } else {
   2624     SmiUntag(dst, src);
   2625     and_(dst, dst, Operand((1 << num_least_bits) - 1));
   2626   }
   2627 }
   2628 
   2629 
   2630 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
   2631                                            Register src,
   2632                                            int num_least_bits) {
   2633   and_(dst, src, Operand((1 << num_least_bits) - 1));
   2634 }
   2635 
   2636 
   2637 void MacroAssembler::CallRuntime(const Runtime::Function* f,
   2638                                  int num_arguments) {
   2639   // All parameters are on the stack.  r0 has the return value after call.
   2640 
   2641   // If the expected number of arguments of the runtime function is
   2642   // constant, we check that the actual number of arguments match the
   2643   // expectation.
   2644   if (f->nargs >= 0 && f->nargs != num_arguments) {
   2645     IllegalOperation(num_arguments);
   2646     return;
   2647   }
   2648 
   2649   // TODO(1236192): Most runtime routines don't need the number of
   2650   // arguments passed in because it is constant. At some point we
   2651   // should remove this need and make the runtime routine entry code
   2652   // smarter.
   2653   mov(r0, Operand(num_arguments));
   2654   mov(r1, Operand(ExternalReference(f, isolate())));
   2655   CEntryStub stub(1);
   2656   CallStub(&stub);
   2657 }
   2658 
   2659 
   2660 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
   2661   CallRuntime(Runtime::FunctionForId(fid), num_arguments);
   2662 }
   2663 
   2664 
   2665 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   2666   const Runtime::Function* function = Runtime::FunctionForId(id);
   2667   mov(r0, Operand(function->nargs));
   2668   mov(r1, Operand(ExternalReference(function, isolate())));
   2669   CEntryStub stub(1, kSaveFPRegs);
   2670   CallStub(&stub);
   2671 }
   2672 
   2673 
   2674 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
   2675                                            int num_arguments) {
   2676   mov(r0, Operand(num_arguments));
   2677   mov(r1, Operand(ext));
   2678 
   2679   CEntryStub stub(1);
   2680   CallStub(&stub);
   2681 }
   2682 
   2683 
   2684 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
   2685                                                int num_arguments,
   2686                                                int result_size) {
   2687   // TODO(1236192): Most runtime routines don't need the number of
   2688   // arguments passed in because it is constant. At some point we
   2689   // should remove this need and make the runtime routine entry code
   2690   // smarter.
   2691   mov(r0, Operand(num_arguments));
   2692   JumpToExternalReference(ext);
   2693 }
   2694 
   2695 
   2696 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
   2697                                      int num_arguments,
   2698                                      int result_size) {
   2699   TailCallExternalReference(ExternalReference(fid, isolate()),
   2700                             num_arguments,
   2701                             result_size);
   2702 }
   2703 
   2704 
   2705 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
   2706 #if defined(__thumb__)
   2707   // Thumb mode builtin.
   2708   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
   2709 #endif
   2710   mov(r1, Operand(builtin));
   2711   CEntryStub stub(1);
   2712   Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
   2713 }
   2714 
   2715 
   2716 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
   2717                                    InvokeFlag flag,
   2718                                    const CallWrapper& call_wrapper) {
   2719   // You can't call a builtin without a valid frame.
   2720   ASSERT(flag == JUMP_FUNCTION || has_frame());
   2721 
   2722   GetBuiltinEntry(r2, id);
   2723   if (flag == CALL_FUNCTION) {
   2724     call_wrapper.BeforeCall(CallSize(r2));
   2725     SetCallKind(r5, CALL_AS_METHOD);
   2726     Call(r2);
   2727     call_wrapper.AfterCall();
   2728   } else {
   2729     ASSERT(flag == JUMP_FUNCTION);
   2730     SetCallKind(r5, CALL_AS_METHOD);
   2731     Jump(r2);
   2732   }
   2733 }
   2734 
   2735 
   2736 void MacroAssembler::GetBuiltinFunction(Register target,
   2737                                         Builtins::JavaScript id) {
   2738   // Load the builtins object into target register.
   2739   ldr(target,
   2740       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   2741   ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
   2742   // Load the JavaScript builtin function from the builtins object.
   2743   ldr(target, FieldMemOperand(target,
   2744                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
   2745 }
   2746 
   2747 
   2748 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   2749   ASSERT(!target.is(r1));
   2750   GetBuiltinFunction(r1, id);
   2751   // Load the code entry point from the builtins object.
   2752   ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   2753 }
   2754 
   2755 
   2756 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
   2757                                 Register scratch1, Register scratch2) {
   2758   if (FLAG_native_code_counters && counter->Enabled()) {
   2759     mov(scratch1, Operand(value));
   2760     mov(scratch2, Operand(ExternalReference(counter)));
   2761     str(scratch1, MemOperand(scratch2));
   2762   }
   2763 }
   2764 
   2765 
   2766 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
   2767                                       Register scratch1, Register scratch2) {
   2768   ASSERT(value > 0);
   2769   if (FLAG_native_code_counters && counter->Enabled()) {
   2770     mov(scratch2, Operand(ExternalReference(counter)));
   2771     ldr(scratch1, MemOperand(scratch2));
   2772     add(scratch1, scratch1, Operand(value));
   2773     str(scratch1, MemOperand(scratch2));
   2774   }
   2775 }
   2776 
   2777 
   2778 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
   2779                                       Register scratch1, Register scratch2) {
   2780   ASSERT(value > 0);
   2781   if (FLAG_native_code_counters && counter->Enabled()) {
   2782     mov(scratch2, Operand(ExternalReference(counter)));
   2783     ldr(scratch1, MemOperand(scratch2));
   2784     sub(scratch1, scratch1, Operand(value));
   2785     str(scratch1, MemOperand(scratch2));
   2786   }
   2787 }
   2788 
   2789 
   2790 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
   2791   if (emit_debug_code())
   2792     Check(cond, reason);
   2793 }
   2794 
   2795 
   2796 void MacroAssembler::AssertFastElements(Register elements) {
   2797   if (emit_debug_code()) {
   2798     ASSERT(!elements.is(ip));
   2799     Label ok;
   2800     push(elements);
   2801     ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
   2802     LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   2803     cmp(elements, ip);
   2804     b(eq, &ok);
   2805     LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
   2806     cmp(elements, ip);
   2807     b(eq, &ok);
   2808     LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
   2809     cmp(elements, ip);
   2810     b(eq, &ok);
   2811     Abort(kJSObjectWithFastElementsMapHasSlowElements);
   2812     bind(&ok);
   2813     pop(elements);
   2814   }
   2815 }
   2816 
   2817 
   2818 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
   2819   Label L;
   2820   b(cond, &L);
   2821   Abort(reason);
   2822   // will not return here
   2823   bind(&L);
   2824 }
   2825 
   2826 
   2827 void MacroAssembler::Abort(BailoutReason reason) {
   2828   Label abort_start;
   2829   bind(&abort_start);
   2830   // We want to pass the msg string like a smi to avoid GC
   2831   // problems, however msg is not guaranteed to be aligned
   2832   // properly. Instead, we pass an aligned pointer that is
   2833   // a proper v8 smi, but also pass the alignment difference
   2834   // from the real pointer as a smi.
   2835   const char* msg = GetBailoutReason(reason);
   2836   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
   2837   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
   2838   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
   2839 #ifdef DEBUG
   2840   if (msg != NULL) {
   2841     RecordComment("Abort message: ");
   2842     RecordComment(msg);
   2843   }
   2844 #endif
   2845 
   2846   mov(r0, Operand(p0));
   2847   push(r0);
   2848   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   2849   push(r0);
   2850   // Disable stub call restrictions to always allow calls to abort.
   2851   if (!has_frame_) {
   2852     // We don't actually want to generate a pile of code for this, so just
   2853     // claim there is a stack frame, without generating one.
   2854     FrameScope scope(this, StackFrame::NONE);
   2855     CallRuntime(Runtime::kAbort, 2);
   2856   } else {
   2857     CallRuntime(Runtime::kAbort, 2);
   2858   }
   2859   // will not return here
   2860   if (is_const_pool_blocked()) {
   2861     // If the calling code cares about the exact number of
   2862     // instructions generated, we insert padding here to keep the size
   2863     // of the Abort macro constant.
   2864     static const int kExpectedAbortInstructions = 10;
   2865     int abort_instructions = InstructionsGeneratedSince(&abort_start);
   2866     ASSERT(abort_instructions <= kExpectedAbortInstructions);
   2867     while (abort_instructions++ < kExpectedAbortInstructions) {
   2868       nop();
   2869     }
   2870   }
   2871 }
   2872 
   2873 
   2874 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   2875   if (context_chain_length > 0) {
   2876     // Move up the chain of contexts to the context containing the slot.
   2877     ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   2878     for (int i = 1; i < context_chain_length; i++) {
   2879       ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   2880     }
   2881   } else {
   2882     // Slot is in the current function context.  Move it into the
   2883     // destination register in case we store into it (the write barrier
   2884     // cannot be allowed to destroy the context in esi).
   2885     mov(dst, cp);
   2886   }
   2887 }
   2888 
   2889 
   2890 void MacroAssembler::LoadTransitionedArrayMapConditional(
   2891     ElementsKind expected_kind,
   2892     ElementsKind transitioned_kind,
   2893     Register map_in_out,
   2894     Register scratch,
   2895     Label* no_map_match) {
   2896   // Load the global or builtins object from the current context.
   2897   ldr(scratch,
   2898       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   2899   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
   2900 
   2901   // Check that the function's map is the same as the expected cached map.
   2902   ldr(scratch,
   2903       MemOperand(scratch,
   2904                  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
   2905   size_t offset = expected_kind * kPointerSize +
   2906       FixedArrayBase::kHeaderSize;
   2907   ldr(ip, FieldMemOperand(scratch, offset));
   2908   cmp(map_in_out, ip);
   2909   b(ne, no_map_match);
   2910 
   2911   // Use the transitioned cached map.
   2912   offset = transitioned_kind * kPointerSize +
   2913       FixedArrayBase::kHeaderSize;
   2914   ldr(map_in_out, FieldMemOperand(scratch, offset));
   2915 }
   2916 
   2917 
   2918 void MacroAssembler::LoadInitialArrayMap(
   2919     Register function_in, Register scratch,
   2920     Register map_out, bool can_have_holes) {
   2921   ASSERT(!function_in.is(map_out));
   2922   Label done;
   2923   ldr(map_out, FieldMemOperand(function_in,
   2924                                JSFunction::kPrototypeOrInitialMapOffset));
   2925   if (!FLAG_smi_only_arrays) {
   2926     ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
   2927     LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   2928                                         kind,
   2929                                         map_out,
   2930                                         scratch,
   2931                                         &done);
   2932   } else if (can_have_holes) {
   2933     LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
   2934                                         FAST_HOLEY_SMI_ELEMENTS,
   2935                                         map_out,
   2936                                         scratch,
   2937                                         &done);
   2938   }
   2939   bind(&done);
   2940 }
   2941 
   2942 
   2943 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   2944   // Load the global or builtins object from the current context.
   2945   ldr(function,
   2946       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   2947   // Load the native context from the global or builtins object.
   2948   ldr(function, FieldMemOperand(function,
   2949                                 GlobalObject::kNativeContextOffset));
   2950   // Load the function from the native context.
   2951   ldr(function, MemOperand(function, Context::SlotOffset(index)));
   2952 }
   2953 
   2954 
   2955 void MacroAssembler::LoadArrayFunction(Register function) {
   2956   // Load the global or builtins object from the current context.
   2957   ldr(function,
   2958       MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   2959   // Load the global context from the global or builtins object.
   2960   ldr(function,
   2961       FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
   2962   // Load the array function from the native context.
   2963   ldr(function,
   2964       MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
   2965 }
   2966 
   2967 
   2968 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   2969                                                   Register map,
   2970                                                   Register scratch) {
   2971   // Load the initial map. The global functions all have initial maps.
   2972   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2973   if (emit_debug_code()) {
   2974     Label ok, fail;
   2975     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
   2976     b(&ok);
   2977     bind(&fail);
   2978     Abort(kGlobalFunctionsMustHaveInitialMap);
   2979     bind(&ok);
   2980   }
   2981 }
   2982 
   2983 
   2984 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
   2985     Register reg,
   2986     Register scratch,
   2987     Label* not_power_of_two_or_zero) {
   2988   sub(scratch, reg, Operand(1), SetCC);
   2989   b(mi, not_power_of_two_or_zero);
   2990   tst(scratch, reg);
   2991   b(ne, not_power_of_two_or_zero);
   2992 }
   2993 
   2994 
   2995 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
   2996     Register reg,
   2997     Register scratch,
   2998     Label* zero_and_neg,
   2999     Label* not_power_of_two) {
   3000   sub(scratch, reg, Operand(1), SetCC);
   3001   b(mi, zero_and_neg);
   3002   tst(scratch, reg);
   3003   b(ne, not_power_of_two);
   3004 }
   3005 
   3006 
   3007 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
   3008                                       Register reg2,
   3009                                       Label* on_not_both_smi) {
   3010   STATIC_ASSERT(kSmiTag == 0);
   3011   tst(reg1, Operand(kSmiTagMask));
   3012   tst(reg2, Operand(kSmiTagMask), eq);
   3013   b(ne, on_not_both_smi);
   3014 }
   3015 
   3016 
   3017 void MacroAssembler::UntagAndJumpIfSmi(
   3018     Register dst, Register src, Label* smi_case) {
   3019   STATIC_ASSERT(kSmiTag == 0);
   3020   SmiUntag(dst, src, SetCC);
   3021   b(cc, smi_case);  // Shifter carry is not set for a smi.
   3022 }
   3023 
   3024 
   3025 void MacroAssembler::UntagAndJumpIfNotSmi(
   3026     Register dst, Register src, Label* non_smi_case) {
   3027   STATIC_ASSERT(kSmiTag == 0);
   3028   SmiUntag(dst, src, SetCC);
   3029   b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
   3030 }
   3031 
   3032 
   3033 void MacroAssembler::JumpIfEitherSmi(Register reg1,
   3034                                      Register reg2,
   3035                                      Label* on_either_smi) {
   3036   STATIC_ASSERT(kSmiTag == 0);
   3037   tst(reg1, Operand(kSmiTagMask));
   3038   tst(reg2, Operand(kSmiTagMask), ne);
   3039   b(eq, on_either_smi);
   3040 }
   3041 
   3042 
   3043 void MacroAssembler::AssertNotSmi(Register object) {
   3044   if (emit_debug_code()) {
   3045     STATIC_ASSERT(kSmiTag == 0);
   3046     tst(object, Operand(kSmiTagMask));
   3047     Check(ne, kOperandIsASmi);
   3048   }
   3049 }
   3050 
   3051 
   3052 void MacroAssembler::AssertSmi(Register object) {
   3053   if (emit_debug_code()) {
   3054     STATIC_ASSERT(kSmiTag == 0);
   3055     tst(object, Operand(kSmiTagMask));
   3056     Check(eq, kOperandIsNotSmi);
   3057   }
   3058 }
   3059 
   3060 
   3061 void MacroAssembler::AssertString(Register object) {
   3062   if (emit_debug_code()) {
   3063     STATIC_ASSERT(kSmiTag == 0);
   3064     tst(object, Operand(kSmiTagMask));
   3065     Check(ne, kOperandIsASmiAndNotAString);
   3066     push(object);
   3067     ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
   3068     CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
   3069     pop(object);
   3070     Check(lo, kOperandIsNotAString);
   3071   }
   3072 }
   3073 
   3074 
   3075 void MacroAssembler::AssertName(Register object) {
   3076   if (emit_debug_code()) {
   3077     STATIC_ASSERT(kSmiTag == 0);
   3078     tst(object, Operand(kSmiTagMask));
   3079     Check(ne, kOperandIsASmiAndNotAName);
   3080     push(object);
   3081     ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
   3082     CompareInstanceType(object, object, LAST_NAME_TYPE);
   3083     pop(object);
   3084     Check(le, kOperandIsNotAName);
   3085   }
   3086 }
   3087 
   3088 
   3089 
   3090 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
   3091   if (emit_debug_code()) {
   3092     CompareRoot(reg, index);
   3093     Check(eq, kHeapNumberMapRegisterClobbered);
   3094   }
   3095 }
   3096 
   3097 
   3098 void MacroAssembler::JumpIfNotHeapNumber(Register object,
   3099                                          Register heap_number_map,
   3100                                          Register scratch,
   3101                                          Label* on_not_heap_number) {
   3102   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   3103   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   3104   cmp(scratch, heap_number_map);
   3105   b(ne, on_not_heap_number);
   3106 }
   3107 
   3108 
   3109 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
   3110     Register first,
   3111     Register second,
   3112     Register scratch1,
   3113     Register scratch2,
   3114     Label* failure) {
   3115   // Test that both first and second are sequential ASCII strings.
   3116   // Assume that they are non-smis.
   3117   ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   3118   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   3119   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   3120   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
   3121 
   3122   JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
   3123                                                scratch2,
   3124                                                scratch1,
   3125                                                scratch2,
   3126                                                failure);
   3127 }
   3128 
   3129 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
   3130                                                          Register second,
   3131                                                          Register scratch1,
   3132                                                          Register scratch2,
   3133                                                          Label* failure) {
   3134   // Check that neither is a smi.
   3135   and_(scratch1, first, Operand(second));
   3136   JumpIfSmi(scratch1, failure);
   3137   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
   3138                                              second,
   3139                                              scratch1,
   3140                                              scratch2,
   3141                                              failure);
   3142 }
   3143 
   3144 
   3145 void MacroAssembler::JumpIfNotUniqueName(Register reg,
   3146                                          Label* not_unique_name) {
   3147   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   3148   Label succeed;
   3149   tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   3150   b(eq, &succeed);
   3151   cmp(reg, Operand(SYMBOL_TYPE));
   3152   b(ne, not_unique_name);
   3153 
   3154   bind(&succeed);
   3155 }
   3156 
   3157 
   3158 // Allocates a heap number or jumps to the need_gc label if the young space
   3159 // is full and a scavenge is needed.
   3160 void MacroAssembler::AllocateHeapNumber(Register result,
   3161                                         Register scratch1,
   3162                                         Register scratch2,
   3163                                         Register heap_number_map,
   3164                                         Label* gc_required,
   3165                                         TaggingMode tagging_mode) {
   3166   // Allocate an object in the heap for the heap number and tag it as a heap
   3167   // object.
   3168   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
   3169            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
   3170 
   3171   // Store heap number map in the allocated object.
   3172   AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   3173   if (tagging_mode == TAG_RESULT) {
   3174     str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
   3175   } else {
   3176     str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
   3177   }
   3178 }
   3179 
   3180 
   3181 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
   3182                                                  DwVfpRegister value,
   3183                                                  Register scratch1,
   3184                                                  Register scratch2,
   3185                                                  Register heap_number_map,
   3186                                                  Label* gc_required) {
   3187   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
   3188   sub(scratch1, result, Operand(kHeapObjectTag));
   3189   vstr(value, scratch1, HeapNumber::kValueOffset);
   3190 }
   3191 
   3192 
   3193 // Copies a fixed number of fields of heap objects from src to dst.
   3194 void MacroAssembler::CopyFields(Register dst,
   3195                                 Register src,
   3196                                 LowDwVfpRegister double_scratch,
   3197                                 int field_count) {
   3198   int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
   3199   for (int i = 0; i < double_count; i++) {
   3200     vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
   3201     vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
   3202   }
   3203 
   3204   STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
   3205   STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
   3206 
   3207   int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
   3208   if (remain != 0) {
   3209     vldr(double_scratch.low(),
   3210          FieldMemOperand(src, (field_count - 1) * kPointerSize));
   3211     vstr(double_scratch.low(),
   3212          FieldMemOperand(dst, (field_count - 1) * kPointerSize));
   3213   }
   3214 }
   3215 
   3216 
   3217 void MacroAssembler::CopyBytes(Register src,
   3218                                Register dst,
   3219                                Register length,
   3220                                Register scratch) {
   3221   Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
   3222 
   3223   // Align src before copying in word size chunks.
   3224   bind(&align_loop);
   3225   cmp(length, Operand::Zero());
   3226   b(eq, &done);
   3227   bind(&align_loop_1);
   3228   tst(src, Operand(kPointerSize - 1));
   3229   b(eq, &word_loop);
   3230   ldrb(scratch, MemOperand(src, 1, PostIndex));
   3231   strb(scratch, MemOperand(dst, 1, PostIndex));
   3232   sub(length, length, Operand(1), SetCC);
   3233   b(ne, &byte_loop_1);
   3234 
   3235   // Copy bytes in word size chunks.
   3236   bind(&word_loop);
   3237   if (emit_debug_code()) {
   3238     tst(src, Operand(kPointerSize - 1));
   3239     Assert(eq, kExpectingAlignmentForCopyBytes);
   3240   }
   3241   cmp(length, Operand(kPointerSize));
   3242   b(lt, &byte_loop);
   3243   ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
   3244   if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
   3245     str(scratch, MemOperand(dst, kPointerSize, PostIndex));
   3246   } else {
   3247     strb(scratch, MemOperand(dst, 1, PostIndex));
   3248     mov(scratch, Operand(scratch, LSR, 8));
   3249     strb(scratch, MemOperand(dst, 1, PostIndex));
   3250     mov(scratch, Operand(scratch, LSR, 8));
   3251     strb(scratch, MemOperand(dst, 1, PostIndex));
   3252     mov(scratch, Operand(scratch, LSR, 8));
   3253     strb(scratch, MemOperand(dst, 1, PostIndex));
   3254   }
   3255   sub(length, length, Operand(kPointerSize));
   3256   b(&word_loop);
   3257 
   3258   // Copy the last bytes if any left.
   3259   bind(&byte_loop);
   3260   cmp(length, Operand::Zero());
   3261   b(eq, &done);
   3262   bind(&byte_loop_1);
   3263   ldrb(scratch, MemOperand(src, 1, PostIndex));
   3264   strb(scratch, MemOperand(dst, 1, PostIndex));
   3265   sub(length, length, Operand(1), SetCC);
   3266   b(ne, &byte_loop_1);
   3267   bind(&done);
   3268 }
   3269 
   3270 
   3271 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
   3272                                                 Register end_offset,
   3273                                                 Register filler) {
   3274   Label loop, entry;
   3275   b(&entry);
   3276   bind(&loop);
   3277   str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
   3278   bind(&entry);
   3279   cmp(start_offset, end_offset);
   3280   b(lt, &loop);
   3281 }
   3282 
   3283 
   3284 void MacroAssembler::CheckFor32DRegs(Register scratch) {
   3285   mov(scratch, Operand(ExternalReference::cpu_features()));
   3286   ldr(scratch, MemOperand(scratch));
   3287   tst(scratch, Operand(1u << VFP32DREGS));
   3288 }
   3289 
   3290 
   3291 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
   3292   CheckFor32DRegs(scratch);
   3293   vstm(db_w, location, d16, d31, ne);
   3294   sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
   3295   vstm(db_w, location, d0, d15);
   3296 }
   3297 
   3298 
   3299 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
   3300   CheckFor32DRegs(scratch);
   3301   vldm(ia_w, location, d0, d15);
   3302   vldm(ia_w, location, d16, d31, ne);
   3303   add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
   3304 }
   3305 
   3306 
   3307 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
   3308     Register first,
   3309     Register second,
   3310     Register scratch1,
   3311     Register scratch2,
   3312     Label* failure) {
   3313   const int kFlatAsciiStringMask =
   3314       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   3315   const int kFlatAsciiStringTag =
   3316       kStringTag | kOneByteStringTag | kSeqStringTag;
   3317   and_(scratch1, first, Operand(kFlatAsciiStringMask));
   3318   and_(scratch2, second, Operand(kFlatAsciiStringMask));
   3319   cmp(scratch1, Operand(kFlatAsciiStringTag));
   3320   // Ignore second test if first test failed.
   3321   cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
   3322   b(ne, failure);
   3323 }
   3324 
   3325 
   3326 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
   3327                                                             Register scratch,
   3328                                                             Label* failure) {
   3329   const int kFlatAsciiStringMask =
   3330       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   3331   const int kFlatAsciiStringTag =
   3332       kStringTag | kOneByteStringTag | kSeqStringTag;
   3333   and_(scratch, type, Operand(kFlatAsciiStringMask));
   3334   cmp(scratch, Operand(kFlatAsciiStringTag));
   3335   b(ne, failure);
   3336 }
   3337 
   3338 static const int kRegisterPassedArguments = 4;
   3339 
   3340 
   3341 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
   3342                                               int num_double_arguments) {
   3343   int stack_passed_words = 0;
   3344   if (use_eabi_hardfloat()) {
   3345     // In the hard floating point calling convention, we can use
   3346     // all double registers to pass doubles.
   3347     if (num_double_arguments > DoubleRegister::NumRegisters()) {
   3348       stack_passed_words +=
   3349           2 * (num_double_arguments - DoubleRegister::NumRegisters());
   3350     }
   3351   } else {
   3352     // In the soft floating point calling convention, every double
   3353     // argument is passed using two registers.
   3354     num_reg_arguments += 2 * num_double_arguments;
   3355   }
   3356   // Up to four simple arguments are passed in registers r0..r3.
   3357   if (num_reg_arguments > kRegisterPassedArguments) {
   3358     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
   3359   }
   3360   return stack_passed_words;
   3361 }
   3362 
   3363 
   3364 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
   3365                                           int num_double_arguments,
   3366                                           Register scratch) {
   3367   int frame_alignment = ActivationFrameAlignment();
   3368   int stack_passed_arguments = CalculateStackPassedWords(
   3369       num_reg_arguments, num_double_arguments);
   3370   if (frame_alignment > kPointerSize) {
   3371     // Make stack end at alignment and make room for num_arguments - 4 words
   3372     // and the original value of sp.
   3373     mov(scratch, sp);
   3374     sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
   3375     ASSERT(IsPowerOf2(frame_alignment));
   3376     and_(sp, sp, Operand(-frame_alignment));
   3377     str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
   3378   } else {
   3379     sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
   3380   }
   3381 }
   3382 
   3383 
   3384 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
   3385                                           Register scratch) {
   3386   PrepareCallCFunction(num_reg_arguments, 0, scratch);
   3387 }
   3388 
   3389 
   3390 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
   3391   if (use_eabi_hardfloat()) {
   3392     Move(d0, dreg);
   3393   } else {
   3394     vmov(r0, r1, dreg);
   3395   }
   3396 }
   3397 
   3398 
   3399 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
   3400                                              DwVfpRegister dreg2) {
   3401   if (use_eabi_hardfloat()) {
   3402     if (dreg2.is(d0)) {
   3403       ASSERT(!dreg1.is(d1));
   3404       Move(d1, dreg2);
   3405       Move(d0, dreg1);
   3406     } else {
   3407       Move(d0, dreg1);
   3408       Move(d1, dreg2);
   3409     }
   3410   } else {
   3411     vmov(r0, r1, dreg1);
   3412     vmov(r2, r3, dreg2);
   3413   }
   3414 }
   3415 
   3416 
   3417 void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
   3418                                              Register reg) {
   3419   if (use_eabi_hardfloat()) {
   3420     Move(d0, dreg);
   3421     Move(r0, reg);
   3422   } else {
   3423     Move(r2, reg);
   3424     vmov(r0, r1, dreg);
   3425   }
   3426 }
   3427 
   3428 
   3429 void MacroAssembler::CallCFunction(ExternalReference function,
   3430                                    int num_reg_arguments,
   3431                                    int num_double_arguments) {
   3432   mov(ip, Operand(function));
   3433   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
   3434 }
   3435 
   3436 
   3437 void MacroAssembler::CallCFunction(Register function,
   3438                                    int num_reg_arguments,
   3439                                    int num_double_arguments) {
   3440   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
   3441 }
   3442 
   3443 
   3444 void MacroAssembler::CallCFunction(ExternalReference function,
   3445                                    int num_arguments) {
   3446   CallCFunction(function, num_arguments, 0);
   3447 }
   3448 
   3449 
   3450 void MacroAssembler::CallCFunction(Register function,
   3451                                    int num_arguments) {
   3452   CallCFunction(function, num_arguments, 0);
   3453 }
   3454 
   3455 
   3456 void MacroAssembler::CallCFunctionHelper(Register function,
   3457                                          int num_reg_arguments,
   3458                                          int num_double_arguments) {
   3459   ASSERT(has_frame());
   3460   // Make sure that the stack is aligned before calling a C function unless
   3461   // running in the simulator. The simulator has its own alignment check which
   3462   // provides more information.
   3463 #if V8_HOST_ARCH_ARM
   3464   if (emit_debug_code()) {
   3465     int frame_alignment = OS::ActivationFrameAlignment();
   3466     int frame_alignment_mask = frame_alignment - 1;
   3467     if (frame_alignment > kPointerSize) {
   3468       ASSERT(IsPowerOf2(frame_alignment));
   3469       Label alignment_as_expected;
   3470       tst(sp, Operand(frame_alignment_mask));
   3471       b(eq, &alignment_as_expected);
   3472       // Don't use Check here, as it will call Runtime_Abort possibly
   3473       // re-entering here.
   3474       stop("Unexpected alignment");
   3475       bind(&alignment_as_expected);
   3476     }
   3477   }
   3478 #endif
   3479 
   3480   // Just call directly. The function called cannot cause a GC, or
   3481   // allow preemption, so the return address in the link register
   3482   // stays correct.
   3483   Call(function);
   3484   int stack_passed_arguments = CalculateStackPassedWords(
   3485       num_reg_arguments, num_double_arguments);
   3486   if (ActivationFrameAlignment() > kPointerSize) {
   3487     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
   3488   } else {
   3489     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
   3490   }
   3491 }
   3492 
   3493 
   3494 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
   3495                                Register result) {
   3496   const uint32_t kLdrOffsetMask = (1 << 12) - 1;
   3497   const int32_t kPCRegOffset = 2 * kPointerSize;
   3498   ldr(result, MemOperand(ldr_location));
   3499   if (emit_debug_code()) {
   3500     // Check that the instruction is a ldr reg, [pc + offset] .
   3501     and_(result, result, Operand(kLdrPCPattern));
   3502     cmp(result, Operand(kLdrPCPattern));
   3503     Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
   3504     // Result was clobbered. Restore it.
   3505     ldr(result, MemOperand(ldr_location));
   3506   }
   3507   // Get the address of the constant.
   3508   and_(result, result, Operand(kLdrOffsetMask));
   3509   add(result, ldr_location, Operand(result));
   3510   add(result, result, Operand(kPCRegOffset));
   3511 }
   3512 
   3513 
   3514 void MacroAssembler::CheckPageFlag(
   3515     Register object,
   3516     Register scratch,
   3517     int mask,
   3518     Condition cc,
   3519     Label* condition_met) {
   3520   Bfc(scratch, object, 0, kPageSizeBits);
   3521   ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
   3522   tst(scratch, Operand(mask));
   3523   b(cc, condition_met);
   3524 }
   3525 
   3526 
   3527 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
   3528                                         Register scratch,
   3529                                         Label* if_deprecated) {
   3530   if (map->CanBeDeprecated()) {
   3531     mov(scratch, Operand(map));
   3532     ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
   3533     tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
   3534     b(ne, if_deprecated);
   3535   }
   3536 }
   3537 
   3538 
   3539 void MacroAssembler::JumpIfBlack(Register object,
   3540                                  Register scratch0,
   3541                                  Register scratch1,
   3542                                  Label* on_black) {
   3543   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
   3544   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   3545 }
   3546 
   3547 
   3548 void MacroAssembler::HasColor(Register object,
   3549                               Register bitmap_scratch,
   3550                               Register mask_scratch,
   3551                               Label* has_color,
   3552                               int first_bit,
   3553                               int second_bit) {
   3554   ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
   3555 
   3556   GetMarkBits(object, bitmap_scratch, mask_scratch);
   3557 
   3558   Label other_color, word_boundary;
   3559   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   3560   tst(ip, Operand(mask_scratch));
   3561   b(first_bit == 1 ? eq : ne, &other_color);
   3562   // Shift left 1 by adding.
   3563   add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
   3564   b(eq, &word_boundary);
   3565   tst(ip, Operand(mask_scratch));
   3566   b(second_bit == 1 ? ne : eq, has_color);
   3567   jmp(&other_color);
   3568 
   3569   bind(&word_boundary);
   3570   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
   3571   tst(ip, Operand(1));
   3572   b(second_bit == 1 ? ne : eq, has_color);
   3573   bind(&other_color);
   3574 }
   3575 
   3576 
   3577 // Detect some, but not all, common pointer-free objects.  This is used by the
   3578 // incremental write barrier which doesn't care about oddballs (they are always
   3579 // marked black immediately so this code is not hit).
   3580 void MacroAssembler::JumpIfDataObject(Register value,
   3581                                       Register scratch,
   3582                                       Label* not_data_object) {
   3583   Label is_data_object;
   3584   ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   3585   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   3586   b(eq, &is_data_object);
   3587   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   3588   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   3589   // If it's a string and it's not a cons string then it's an object containing
   3590   // no GC pointers.
   3591   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   3592   tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
   3593   b(ne, not_data_object);
   3594   bind(&is_data_object);
   3595 }
   3596 
   3597 
   3598 void MacroAssembler::GetMarkBits(Register addr_reg,
   3599                                  Register bitmap_reg,
   3600                                  Register mask_reg) {
   3601   ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
   3602   and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
   3603   Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
   3604   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
   3605   Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
   3606   add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
   3607   mov(ip, Operand(1));
   3608   mov(mask_reg, Operand(ip, LSL, mask_reg));
   3609 }
   3610 
   3611 
   3612 void MacroAssembler::EnsureNotWhite(
   3613     Register value,
   3614     Register bitmap_scratch,
   3615     Register mask_scratch,
   3616     Register load_scratch,
   3617     Label* value_is_white_and_not_data) {
   3618   ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
   3619   GetMarkBits(value, bitmap_scratch, mask_scratch);
   3620 
   3621   // If the value is black or grey we don't need to do anything.
   3622   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   3623   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   3624   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
   3625   ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   3626 
   3627   Label done;
   3628 
   3629   // Since both black and grey have a 1 in the first position and white does
   3630   // not have a 1 there we only need to check one bit.
   3631   ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   3632   tst(mask_scratch, load_scratch);
   3633   b(ne, &done);
   3634 
   3635   if (emit_debug_code()) {
   3636     // Check for impossible bit pattern.
   3637     Label ok;
   3638     // LSL may overflow, making the check conservative.
   3639     tst(load_scratch, Operand(mask_scratch, LSL, 1));
   3640     b(eq, &ok);
   3641     stop("Impossible marking bit pattern");
   3642     bind(&ok);
   3643   }
   3644 
   3645   // Value is white.  We check whether it is data that doesn't need scanning.
   3646   // Currently only checks for HeapNumber and non-cons strings.
   3647   Register map = load_scratch;  // Holds map while checking type.
   3648   Register length = load_scratch;  // Holds length of object after testing type.
   3649   Label is_data_object;
   3650 
   3651   // Check for heap-number
   3652   ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
   3653   CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   3654   mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
   3655   b(eq, &is_data_object);
   3656 
   3657   // Check for strings.
   3658   ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   3659   ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   3660   // If it's a string and it's not a cons string then it's an object containing
   3661   // no GC pointers.
   3662   Register instance_type = load_scratch;
   3663   ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
   3664   tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
   3665   b(ne, value_is_white_and_not_data);
   3666   // It's a non-indirect (non-cons and non-slice) string.
   3667   // If it's external, the length is just ExternalString::kSize.
   3668   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   3669   // External strings are the only ones with the kExternalStringTag bit
   3670   // set.
   3671   ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
   3672   ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
   3673   tst(instance_type, Operand(kExternalStringTag));
   3674   mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
   3675   b(ne, &is_data_object);
   3676 
   3677   // Sequential string, either ASCII or UC16.
   3678   // For ASCII (char-size of 1) we shift the smi tag away to get the length.
   3679   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
   3680   // getting the length multiplied by 2.
   3681   ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
   3682   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   3683   ldr(ip, FieldMemOperand(value, String::kLengthOffset));
   3684   tst(instance_type, Operand(kStringEncodingMask));
   3685   mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
   3686   add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
   3687   and_(length, length, Operand(~kObjectAlignmentMask));
   3688 
   3689   bind(&is_data_object);
   3690   // Value is a data object, and it is white.  Mark it black.  Since we know
   3691   // that the object is white we can make it black by flipping one bit.
   3692   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   3693   orr(ip, ip, Operand(mask_scratch));
   3694   str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   3695 
   3696   and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
   3697   ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
   3698   add(ip, ip, Operand(length));
   3699   str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
   3700 
   3701   bind(&done);
   3702 }
   3703 
   3704 
   3705 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
   3706   Usat(output_reg, 8, Operand(input_reg));
   3707 }
   3708 
   3709 
   3710 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
   3711                                         DwVfpRegister input_reg,
   3712                                         LowDwVfpRegister double_scratch) {
   3713   Label above_zero;
   3714   Label done;
   3715   Label in_bounds;
   3716 
   3717   VFPCompareAndSetFlags(input_reg, 0.0);
   3718   b(gt, &above_zero);
   3719 
   3720   // Double value is less than zero, NaN or Inf, return 0.
   3721   mov(result_reg, Operand::Zero());
   3722   b(al, &done);
   3723 
   3724   // Double value is >= 255, return 255.
   3725   bind(&above_zero);
   3726   Vmov(double_scratch, 255.0, result_reg);
   3727   VFPCompareAndSetFlags(input_reg, double_scratch);
   3728   b(le, &in_bounds);
   3729   mov(result_reg, Operand(255));
   3730   b(al, &done);
   3731 
   3732   // In 0-255 range, round and truncate.
   3733   bind(&in_bounds);
   3734   // Save FPSCR.
   3735   vmrs(ip);
   3736   // Set rounding mode to round to the nearest integer by clearing bits[23:22].
   3737   bic(result_reg, ip, Operand(kVFPRoundingModeMask));
   3738   vmsr(result_reg);
   3739   vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
   3740   vmov(result_reg, double_scratch.low());
   3741   // Restore FPSCR.
   3742   vmsr(ip);
   3743   bind(&done);
   3744 }
   3745 
   3746 
   3747 void MacroAssembler::LoadInstanceDescriptors(Register map,
   3748                                              Register descriptors) {
   3749   ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
   3750 }
   3751 
   3752 
   3753 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
   3754   ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
   3755   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
   3756 }
   3757 
   3758 
   3759 void MacroAssembler::EnumLength(Register dst, Register map) {
   3760   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
   3761   ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
   3762   and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
   3763 }
   3764 
   3765 
   3766 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
   3767   Register  empty_fixed_array_value = r6;
   3768   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   3769   Label next, start;
   3770   mov(r2, r0);
   3771 
   3772   // Check if the enum length field is properly initialized, indicating that
   3773   // there is an enum cache.
   3774   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
   3775 
   3776   EnumLength(r3, r1);
   3777   cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
   3778   b(eq, call_runtime);
   3779 
   3780   jmp(&start);
   3781 
   3782   bind(&next);
   3783   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
   3784 
   3785   // For all objects but the receiver, check that the cache is empty.
   3786   EnumLength(r3, r1);
   3787   cmp(r3, Operand(Smi::FromInt(0)));
   3788   b(ne, call_runtime);
   3789 
   3790   bind(&start);
   3791 
   3792   // Check that there are no elements. Register r2 contains the current JS
   3793   // object we've reached through the prototype chain.
   3794   ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
   3795   cmp(r2, empty_fixed_array_value);
   3796   b(ne, call_runtime);
   3797 
   3798   ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
   3799   cmp(r2, null_value);
   3800   b(ne, &next);
   3801 }
   3802 
   3803 
   3804 void MacroAssembler::TestJSArrayForAllocationMemento(
   3805     Register receiver_reg,
   3806     Register scratch_reg) {
   3807   Label no_memento_available;
   3808   ExternalReference new_space_start =
   3809       ExternalReference::new_space_start(isolate());
   3810   ExternalReference new_space_allocation_top =
   3811       ExternalReference::new_space_allocation_top_address(isolate());
   3812   add(scratch_reg, receiver_reg,
   3813       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
   3814   cmp(scratch_reg, Operand(new_space_start));
   3815   b(lt, &no_memento_available);
   3816   mov(ip, Operand(new_space_allocation_top));
   3817   ldr(ip, MemOperand(ip));
   3818   cmp(scratch_reg, ip);
   3819   b(gt, &no_memento_available);
   3820   ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
   3821   cmp(scratch_reg,
   3822       Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
   3823   bind(&no_memento_available);
   3824 }
   3825 
   3826 
   3827 #ifdef DEBUG
   3828 bool AreAliased(Register reg1,
   3829                 Register reg2,
   3830                 Register reg3,
   3831                 Register reg4,
   3832                 Register reg5,
   3833                 Register reg6) {
   3834   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
   3835     reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
   3836 
   3837   RegList regs = 0;
   3838   if (reg1.is_valid()) regs |= reg1.bit();
   3839   if (reg2.is_valid()) regs |= reg2.bit();
   3840   if (reg3.is_valid()) regs |= reg3.bit();
   3841   if (reg4.is_valid()) regs |= reg4.bit();
   3842   if (reg5.is_valid()) regs |= reg5.bit();
   3843   if (reg6.is_valid()) regs |= reg6.bit();
   3844   int n_of_non_aliasing_regs = NumRegs(regs);
   3845 
   3846   return n_of_valid_regs != n_of_non_aliasing_regs;
   3847 }
   3848 #endif
   3849 
   3850 
   3851 CodePatcher::CodePatcher(byte* address, int instructions)
   3852     : address_(address),
   3853       size_(instructions * Assembler::kInstrSize),
   3854       masm_(NULL, address, size_ + Assembler::kGap) {
   3855   // Create a new macro assembler pointing to the address of the code to patch.
   3856   // The size is adjusted with kGap on order for the assembler to generate size
   3857   // bytes of instructions without failing with buffer size constraints.
   3858   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   3859 }
   3860 
   3861 
   3862 CodePatcher::~CodePatcher() {
   3863   // Indicate that code has changed.
   3864   CPU::FlushICache(address_, size_);
   3865 
   3866   // Check that the code was patched as expected.
   3867   ASSERT(masm_.pc_ == address_ + size_);
   3868   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   3869 }
   3870 
   3871 
   3872 void CodePatcher::Emit(Instr instr) {
   3873   masm()->emit(instr);
   3874 }
   3875 
   3876 
   3877 void CodePatcher::Emit(Address addr) {
   3878   masm()->emit(reinterpret_cast<Instr>(addr));
   3879 }
   3880 
   3881 
   3882 void CodePatcher::EmitCondition(Condition cond) {
   3883   Instr instr = Assembler::instr_at(masm_.pc_);
   3884   instr = (instr & ~kCondMask) | cond;
   3885   masm_.emit(instr);
   3886 }
   3887 
   3888 
   3889 } }  // namespace v8::internal
   3890 
   3891 #endif  // V8_TARGET_ARCH_ARM
   3892