Home | History | Annotate | Download | only in arm
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include <limits.h>  // For LONG_MIN, LONG_MAX.
     29 
     30 #include "v8.h"
     31 
     32 #if defined(V8_TARGET_ARCH_ARM)
     33 
     34 #include "bootstrapper.h"
     35 #include "codegen.h"
     36 #include "debug.h"
     37 #include "runtime.h"
     38 
     39 namespace v8 {
     40 namespace internal {
     41 
     42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     43     : Assembler(arg_isolate, buffer, size),
     44       generating_stub_(false),
     45       allow_stub_calls_(true) {
     46   if (isolate() != NULL) {
     47     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
     48                                   isolate());
     49   }
     50 }
     51 
     52 
     53 // We always generate arm code, never thumb code, even if V8 is compiled to
     54 // thumb, so we require inter-working support
     55 #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
     56 #error "flag -mthumb-interwork missing"
     57 #endif
     58 
     59 
     60 // We do not support thumb inter-working with an arm architecture not supporting
     61 // the blx instruction (below v5t).  If you know what CPU you are compiling for
     62 // you can use -march=armv7 or similar.
     63 #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
     64 # error "For thumb inter-working we require an architecture which supports blx"
     65 #endif
     66 
     67 
     68 // Using bx does not yield better code, so use it only when required
     69 #if defined(USE_THUMB_INTERWORK)
     70 #define USE_BX 1
     71 #endif
     72 
     73 
     74 void MacroAssembler::Jump(Register target, Condition cond) {
     75 #if USE_BX
     76   bx(target, cond);
     77 #else
     78   mov(pc, Operand(target), LeaveCC, cond);
     79 #endif
     80 }
     81 
     82 
     83 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
     84                           Condition cond) {
     85 #if USE_BX
     86   mov(ip, Operand(target, rmode), LeaveCC, cond);
     87   bx(ip, cond);
     88 #else
     89   mov(pc, Operand(target, rmode), LeaveCC, cond);
     90 #endif
     91 }
     92 
     93 
     94 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
     95                           Condition cond) {
     96   ASSERT(!RelocInfo::IsCodeTarget(rmode));
     97   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
     98 }
     99 
    100 
    101 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
    102                           Condition cond) {
    103   ASSERT(RelocInfo::IsCodeTarget(rmode));
    104   // 'code' is always generated ARM code, never THUMB code
    105   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
    106 }
    107 
    108 
    109 int MacroAssembler::CallSize(Register target, Condition cond) {
    110 #if USE_BLX
    111   return kInstrSize;
    112 #else
    113   return 2 * kInstrSize;
    114 #endif
    115 }
    116 
    117 
    118 void MacroAssembler::Call(Register target, Condition cond) {
    119   // Block constant pool for the call instruction sequence.
    120   BlockConstPoolScope block_const_pool(this);
    121 #ifdef DEBUG
    122   int pre_position = pc_offset();
    123 #endif
    124 
    125 #if USE_BLX
    126   blx(target, cond);
    127 #else
    128   // set lr for return at current pc + 8
    129   mov(lr, Operand(pc), LeaveCC, cond);
    130   mov(pc, Operand(target), LeaveCC, cond);
    131 #endif
    132 
    133 #ifdef DEBUG
    134   int post_position = pc_offset();
    135   CHECK_EQ(pre_position + CallSize(target, cond), post_position);
    136 #endif
    137 }
    138 
    139 
    140 int MacroAssembler::CallSize(
    141     intptr_t target, RelocInfo::Mode rmode, Condition cond) {
    142   int size = 2 * kInstrSize;
    143   Instr mov_instr = cond | MOV | LeaveCC;
    144   if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
    145     size += kInstrSize;
    146   }
    147   return size;
    148 }
    149 
    150 
    151 void MacroAssembler::Call(
    152     intptr_t target, RelocInfo::Mode rmode, Condition cond) {
    153   // Block constant pool for the call instruction sequence.
    154   BlockConstPoolScope block_const_pool(this);
    155 #ifdef DEBUG
    156   int pre_position = pc_offset();
    157 #endif
    158 
    159 #if USE_BLX
    160   // On ARMv5 and after the recommended call sequence is:
    161   //  ldr ip, [pc, #...]
    162   //  blx ip
    163 
    164   // Statement positions are expected to be recorded when the target
    165   // address is loaded. The mov method will automatically record
    166   // positions when pc is the target, since this is not the case here
    167   // we have to do it explicitly.
    168   positions_recorder()->WriteRecordedPositions();
    169 
    170   mov(ip, Operand(target, rmode), LeaveCC, cond);
    171   blx(ip, cond);
    172 
    173   ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
    174 #else
    175   // Set lr for return at current pc + 8.
    176   mov(lr, Operand(pc), LeaveCC, cond);
    177   // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
    178   mov(pc, Operand(target, rmode), LeaveCC, cond);
    179   ASSERT(kCallTargetAddressOffset == kInstrSize);
    180 #endif
    181 
    182 #ifdef DEBUG
    183   int post_position = pc_offset();
    184   CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
    185 #endif
    186 }
    187 
    188 
    189 int MacroAssembler::CallSize(
    190     byte* target, RelocInfo::Mode rmode, Condition cond) {
    191   return CallSize(reinterpret_cast<intptr_t>(target), rmode);
    192 }
    193 
    194 
    195 void MacroAssembler::Call(
    196     byte* target, RelocInfo::Mode rmode, Condition cond) {
    197 #ifdef DEBUG
    198   int pre_position = pc_offset();
    199 #endif
    200 
    201   ASSERT(!RelocInfo::IsCodeTarget(rmode));
    202   Call(reinterpret_cast<intptr_t>(target), rmode, cond);
    203 
    204 #ifdef DEBUG
    205   int post_position = pc_offset();
    206   CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
    207 #endif
    208 }
    209 
    210 
    211 int MacroAssembler::CallSize(
    212     Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
    213   return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
    214 }
    215 
    216 
    217 void MacroAssembler::Call(
    218     Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
    219 #ifdef DEBUG
    220   int pre_position = pc_offset();
    221 #endif
    222 
    223   ASSERT(RelocInfo::IsCodeTarget(rmode));
    224   // 'code' is always generated ARM code, never THUMB code
    225   Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
    226 
    227 #ifdef DEBUG
    228   int post_position = pc_offset();
    229   CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
    230 #endif
    231 }
    232 
    233 
    234 void MacroAssembler::Ret(Condition cond) {
    235 #if USE_BX
    236   bx(lr, cond);
    237 #else
    238   mov(pc, Operand(lr), LeaveCC, cond);
    239 #endif
    240 }
    241 
    242 
    243 void MacroAssembler::Drop(int count, Condition cond) {
    244   if (count > 0) {
    245     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
    246   }
    247 }
    248 
    249 
    250 void MacroAssembler::Ret(int drop, Condition cond) {
    251   Drop(drop, cond);
    252   Ret(cond);
    253 }
    254 
    255 
    256 void MacroAssembler::Swap(Register reg1,
    257                           Register reg2,
    258                           Register scratch,
    259                           Condition cond) {
    260   if (scratch.is(no_reg)) {
    261     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
    262     eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
    263     eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
    264   } else {
    265     mov(scratch, reg1, LeaveCC, cond);
    266     mov(reg1, reg2, LeaveCC, cond);
    267     mov(reg2, scratch, LeaveCC, cond);
    268   }
    269 }
    270 
    271 
    272 void MacroAssembler::Call(Label* target) {
    273   bl(target);
    274 }
    275 
    276 
    277 void MacroAssembler::Move(Register dst, Handle<Object> value) {
    278   mov(dst, Operand(value));
    279 }
    280 
    281 
    282 void MacroAssembler::Move(Register dst, Register src) {
    283   if (!dst.is(src)) {
    284     mov(dst, src);
    285   }
    286 }
    287 
    288 
    289 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
    290                          Condition cond) {
    291   if (!src2.is_reg() &&
    292       !src2.must_use_constant_pool() &&
    293       src2.immediate() == 0) {
    294     mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
    295 
    296   } else if (!src2.is_single_instruction() &&
    297              !src2.must_use_constant_pool() &&
    298              CpuFeatures::IsSupported(ARMv7) &&
    299              IsPowerOf2(src2.immediate() + 1)) {
    300     ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
    301 
    302   } else {
    303     and_(dst, src1, src2, LeaveCC, cond);
    304   }
    305 }
    306 
    307 
    308 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
    309                           Condition cond) {
    310   ASSERT(lsb < 32);
    311   if (!CpuFeatures::IsSupported(ARMv7)) {
    312     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    313     and_(dst, src1, Operand(mask), LeaveCC, cond);
    314     if (lsb != 0) {
    315       mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
    316     }
    317   } else {
    318     ubfx(dst, src1, lsb, width, cond);
    319   }
    320 }
    321 
    322 
    323 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
    324                           Condition cond) {
    325   ASSERT(lsb < 32);
    326   if (!CpuFeatures::IsSupported(ARMv7)) {
    327     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    328     and_(dst, src1, Operand(mask), LeaveCC, cond);
    329     int shift_up = 32 - lsb - width;
    330     int shift_down = lsb + shift_up;
    331     if (shift_up != 0) {
    332       mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
    333     }
    334     if (shift_down != 0) {
    335       mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
    336     }
    337   } else {
    338     sbfx(dst, src1, lsb, width, cond);
    339   }
    340 }
    341 
    342 
    343 void MacroAssembler::Bfi(Register dst,
    344                          Register src,
    345                          Register scratch,
    346                          int lsb,
    347                          int width,
    348                          Condition cond) {
    349   ASSERT(0 <= lsb && lsb < 32);
    350   ASSERT(0 <= width && width < 32);
    351   ASSERT(lsb + width < 32);
    352   ASSERT(!scratch.is(dst));
    353   if (width == 0) return;
    354   if (!CpuFeatures::IsSupported(ARMv7)) {
    355     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    356     bic(dst, dst, Operand(mask));
    357     and_(scratch, src, Operand((1 << width) - 1));
    358     mov(scratch, Operand(scratch, LSL, lsb));
    359     orr(dst, dst, scratch);
    360   } else {
    361     bfi(dst, src, lsb, width, cond);
    362   }
    363 }
    364 
    365 
    366 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
    367   ASSERT(lsb < 32);
    368   if (!CpuFeatures::IsSupported(ARMv7)) {
    369     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    370     bic(dst, dst, Operand(mask));
    371   } else {
    372     bfc(dst, lsb, width, cond);
    373   }
    374 }
    375 
    376 
    377 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
    378                           Condition cond) {
    379   if (!CpuFeatures::IsSupported(ARMv7)) {
    380     ASSERT(!dst.is(pc) && !src.rm().is(pc));
    381     ASSERT((satpos >= 0) && (satpos <= 31));
    382 
    383     // These asserts are required to ensure compatibility with the ARMv7
    384     // implementation.
    385     ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
    386     ASSERT(src.rs().is(no_reg));
    387 
    388     Label done;
    389     int satval = (1 << satpos) - 1;
    390 
    391     if (cond != al) {
    392       b(NegateCondition(cond), &done);  // Skip saturate if !condition.
    393     }
    394     if (!(src.is_reg() && dst.is(src.rm()))) {
    395       mov(dst, src);
    396     }
    397     tst(dst, Operand(~satval));
    398     b(eq, &done);
    399     mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi);  // 0 if negative.
    400     mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
    401     bind(&done);
    402   } else {
    403     usat(dst, satpos, src, cond);
    404   }
    405 }
    406 
    407 
    408 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
    409   // Empty the const pool.
    410   CheckConstPool(true, true);
    411   add(pc, pc, Operand(index,
    412                       LSL,
    413                       Instruction::kInstrSizeLog2 - kSmiTagSize));
    414   BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
    415   nop();  // Jump table alignment.
    416   for (int i = 0; i < targets.length(); i++) {
    417     b(targets[i]);
    418   }
    419 }
    420 
    421 
    422 void MacroAssembler::LoadRoot(Register destination,
    423                               Heap::RootListIndex index,
    424                               Condition cond) {
    425   ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
    426 }
    427 
    428 
    429 void MacroAssembler::StoreRoot(Register source,
    430                                Heap::RootListIndex index,
    431                                Condition cond) {
    432   str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
    433 }
    434 
    435 
    436 void MacroAssembler::RecordWriteHelper(Register object,
    437                                        Register address,
    438                                        Register scratch) {
    439   if (emit_debug_code()) {
    440     // Check that the object is not in new space.
    441     Label not_in_new_space;
    442     InNewSpace(object, scratch, ne, &not_in_new_space);
    443     Abort("new-space object passed to RecordWriteHelper");
    444     bind(&not_in_new_space);
    445   }
    446 
    447   // Calculate page address.
    448   Bfc(object, 0, kPageSizeBits);
    449 
    450   // Calculate region number.
    451   Ubfx(address, address, Page::kRegionSizeLog2,
    452        kPageSizeBits - Page::kRegionSizeLog2);
    453 
    454   // Mark region dirty.
    455   ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
    456   mov(ip, Operand(1));
    457   orr(scratch, scratch, Operand(ip, LSL, address));
    458   str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
    459 }
    460 
    461 
    462 void MacroAssembler::InNewSpace(Register object,
    463                                 Register scratch,
    464                                 Condition cond,
    465                                 Label* branch) {
    466   ASSERT(cond == eq || cond == ne);
    467   and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
    468   cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
    469   b(cond, branch);
    470 }
    471 
    472 
    473 // Will clobber 4 registers: object, offset, scratch, ip.  The
    474 // register 'object' contains a heap object pointer.  The heap object
    475 // tag is shifted away.
    476 void MacroAssembler::RecordWrite(Register object,
    477                                  Operand offset,
    478                                  Register scratch0,
    479                                  Register scratch1) {
    480   // The compiled code assumes that record write doesn't change the
    481   // context register, so we check that none of the clobbered
    482   // registers are cp.
    483   ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
    484 
    485   Label done;
    486 
    487   // First, test that the object is not in the new space.  We cannot set
    488   // region marks for new space pages.
    489   InNewSpace(object, scratch0, eq, &done);
    490 
    491   // Add offset into the object.
    492   add(scratch0, object, offset);
    493 
    494   // Record the actual write.
    495   RecordWriteHelper(object, scratch0, scratch1);
    496 
    497   bind(&done);
    498 
    499   // Clobber all input registers when running with the debug-code flag
    500   // turned on to provoke errors.
    501   if (emit_debug_code()) {
    502     mov(object, Operand(BitCast<int32_t>(kZapValue)));
    503     mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
    504     mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
    505   }
    506 }
    507 
    508 
    509 // Will clobber 4 registers: object, address, scratch, ip.  The
    510 // register 'object' contains a heap object pointer.  The heap object
    511 // tag is shifted away.
    512 void MacroAssembler::RecordWrite(Register object,
    513                                  Register address,
    514                                  Register scratch) {
    515   // The compiled code assumes that record write doesn't change the
    516   // context register, so we check that none of the clobbered
    517   // registers are cp.
    518   ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
    519 
    520   Label done;
    521 
    522   // First, test that the object is not in the new space.  We cannot set
    523   // region marks for new space pages.
    524   InNewSpace(object, scratch, eq, &done);
    525 
    526   // Record the actual write.
    527   RecordWriteHelper(object, address, scratch);
    528 
    529   bind(&done);
    530 
    531   // Clobber all input registers when running with the debug-code flag
    532   // turned on to provoke errors.
    533   if (emit_debug_code()) {
    534     mov(object, Operand(BitCast<int32_t>(kZapValue)));
    535     mov(address, Operand(BitCast<int32_t>(kZapValue)));
    536     mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
    537   }
    538 }
    539 
    540 
    541 // Push and pop all registers that can hold pointers.
    542 void MacroAssembler::PushSafepointRegisters() {
    543   // Safepoints expect a block of contiguous register values starting with r0:
    544   ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
    545   // Safepoints expect a block of kNumSafepointRegisters values on the
    546   // stack, so adjust the stack for unsaved registers.
    547   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
    548   ASSERT(num_unsaved >= 0);
    549   sub(sp, sp, Operand(num_unsaved * kPointerSize));
    550   stm(db_w, sp, kSafepointSavedRegisters);
    551 }
    552 
    553 
    554 void MacroAssembler::PopSafepointRegisters() {
    555   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
    556   ldm(ia_w, sp, kSafepointSavedRegisters);
    557   add(sp, sp, Operand(num_unsaved * kPointerSize));
    558 }
    559 
    560 
    561 void MacroAssembler::PushSafepointRegistersAndDoubles() {
    562   PushSafepointRegisters();
    563   sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
    564                       kDoubleSize));
    565   for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
    566     vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
    567   }
    568 }
    569 
    570 
    571 void MacroAssembler::PopSafepointRegistersAndDoubles() {
    572   for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
    573     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
    574   }
    575   add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
    576                       kDoubleSize));
    577   PopSafepointRegisters();
    578 }
    579 
    580 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
    581                                                              Register dst) {
    582   str(src, SafepointRegistersAndDoublesSlot(dst));
    583 }
    584 
    585 
    586 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
    587   str(src, SafepointRegisterSlot(dst));
    588 }
    589 
    590 
    591 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
    592   ldr(dst, SafepointRegisterSlot(src));
    593 }
    594 
    595 
    596 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
    597   // The registers are pushed starting with the highest encoding,
    598   // which means that lowest encodings are closest to the stack pointer.
    599   ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
    600   return reg_code;
    601 }
    602 
    603 
    604 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
    605   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
    606 }
    607 
    608 
    609 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
    610   // General purpose registers are pushed last on the stack.
    611   int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
    612   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
    613   return MemOperand(sp, doubles_size + register_offset);
    614 }
    615 
    616 
    617 void MacroAssembler::Ldrd(Register dst1, Register dst2,
    618                           const MemOperand& src, Condition cond) {
    619   ASSERT(src.rm().is(no_reg));
    620   ASSERT(!dst1.is(lr));  // r14.
    621   ASSERT_EQ(0, dst1.code() % 2);
    622   ASSERT_EQ(dst1.code() + 1, dst2.code());
    623 
    624   // Generate two ldr instructions if ldrd is not available.
    625   if (CpuFeatures::IsSupported(ARMv7)) {
    626     CpuFeatures::Scope scope(ARMv7);
    627     ldrd(dst1, dst2, src, cond);
    628   } else {
    629     MemOperand src2(src);
    630     src2.set_offset(src2.offset() + 4);
    631     if (dst1.is(src.rn())) {
    632       ldr(dst2, src2, cond);
    633       ldr(dst1, src, cond);
    634     } else {
    635       ldr(dst1, src, cond);
    636       ldr(dst2, src2, cond);
    637     }
    638   }
    639 }
    640 
    641 
    642 void MacroAssembler::Strd(Register src1, Register src2,
    643                           const MemOperand& dst, Condition cond) {
    644   ASSERT(dst.rm().is(no_reg));
    645   ASSERT(!src1.is(lr));  // r14.
    646   ASSERT_EQ(0, src1.code() % 2);
    647   ASSERT_EQ(src1.code() + 1, src2.code());
    648 
    649   // Generate two str instructions if strd is not available.
    650   if (CpuFeatures::IsSupported(ARMv7)) {
    651     CpuFeatures::Scope scope(ARMv7);
    652     strd(src1, src2, dst, cond);
    653   } else {
    654     MemOperand dst2(dst);
    655     dst2.set_offset(dst2.offset() + 4);
    656     str(src1, dst, cond);
    657     str(src2, dst2, cond);
    658   }
    659 }
    660 
    661 
    662 void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
    663                                     const Register scratch,
    664                                     const Condition cond) {
    665   vmrs(scratch, cond);
    666   bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
    667   vmsr(scratch, cond);
    668 }
    669 
    670 
    671 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
    672                                            const DwVfpRegister src2,
    673                                            const Condition cond) {
    674   // Compare and move FPSCR flags to the normal condition flags.
    675   VFPCompareAndLoadFlags(src1, src2, pc, cond);
    676 }
    677 
    678 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
    679                                            const double src2,
    680                                            const Condition cond) {
    681   // Compare and move FPSCR flags to the normal condition flags.
    682   VFPCompareAndLoadFlags(src1, src2, pc, cond);
    683 }
    684 
    685 
    686 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
    687                                             const DwVfpRegister src2,
    688                                             const Register fpscr_flags,
    689                                             const Condition cond) {
    690   // Compare and load FPSCR.
    691   vcmp(src1, src2, cond);
    692   vmrs(fpscr_flags, cond);
    693 }
    694 
    695 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
    696                                             const double src2,
    697                                             const Register fpscr_flags,
    698                                             const Condition cond) {
    699   // Compare and load FPSCR.
    700   vcmp(src1, src2, cond);
    701   vmrs(fpscr_flags, cond);
    702 }
    703 
    704 
    705 void MacroAssembler::EnterFrame(StackFrame::Type type) {
    706   // r0-r3: preserved
    707   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
    708   mov(ip, Operand(Smi::FromInt(type)));
    709   push(ip);
    710   mov(ip, Operand(CodeObject()));
    711   push(ip);
    712   add(fp, sp, Operand(3 * kPointerSize));  // Adjust FP to point to saved FP.
    713 }
    714 
    715 
    716 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
    717   // r0: preserved
    718   // r1: preserved
    719   // r2: preserved
    720 
    721   // Drop the execution stack down to the frame pointer and restore
    722   // the caller frame pointer and return address.
    723   mov(sp, fp);
    724   ldm(ia_w, sp, fp.bit() | lr.bit());
    725 }
    726 
    727 
    728 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
    729   // Setup the frame structure on the stack.
    730   ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
    731   ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
    732   ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
    733   Push(lr, fp);
    734   mov(fp, Operand(sp));  // Setup new frame pointer.
    735   // Reserve room for saved entry sp and code object.
    736   sub(sp, sp, Operand(2 * kPointerSize));
    737   if (emit_debug_code()) {
    738     mov(ip, Operand(0));
    739     str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
    740   }
    741   mov(ip, Operand(CodeObject()));
    742   str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
    743 
    744   // Save the frame pointer and the context in top.
    745   mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
    746   str(fp, MemOperand(ip));
    747   mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
    748   str(cp, MemOperand(ip));
    749 
    750   // Optionally save all double registers.
    751   if (save_doubles) {
    752     DwVfpRegister first = d0;
    753     DwVfpRegister last =
    754         DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
    755     vstm(db_w, sp, first, last);
    756     // Note that d0 will be accessible at
    757     //   fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
    758     // since the sp slot and code slot were pushed after the fp.
    759   }
    760 
    761   // Reserve place for the return address and stack space and align the frame
    762   // preparing for calling the runtime function.
    763   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
    764   sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
    765   if (frame_alignment > 0) {
    766     ASSERT(IsPowerOf2(frame_alignment));
    767     and_(sp, sp, Operand(-frame_alignment));
    768   }
    769 
    770   // Set the exit frame sp value to point just before the return address
    771   // location.
    772   add(ip, sp, Operand(kPointerSize));
    773   str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
    774 }
    775 
    776 
    777 void MacroAssembler::InitializeNewString(Register string,
    778                                          Register length,
    779                                          Heap::RootListIndex map_index,
    780                                          Register scratch1,
    781                                          Register scratch2) {
    782   mov(scratch1, Operand(length, LSL, kSmiTagSize));
    783   LoadRoot(scratch2, map_index);
    784   str(scratch1, FieldMemOperand(string, String::kLengthOffset));
    785   mov(scratch1, Operand(String::kEmptyHashField));
    786   str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
    787   str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
    788 }
    789 
    790 
    791 int MacroAssembler::ActivationFrameAlignment() {
    792 #if defined(V8_HOST_ARCH_ARM)
    793   // Running on the real platform. Use the alignment as mandated by the local
    794   // environment.
    795   // Note: This will break if we ever start generating snapshots on one ARM
    796   // platform for another ARM platform with a different alignment.
    797   return OS::ActivationFrameAlignment();
    798 #else  // defined(V8_HOST_ARCH_ARM)
    799   // If we are using the simulator then we should always align to the expected
    800   // alignment. As the simulator is used to generate snapshots we do not know
    801   // if the target platform will need alignment, so this is controlled from a
    802   // flag.
    803   return FLAG_sim_stack_alignment;
    804 #endif  // defined(V8_HOST_ARCH_ARM)
    805 }
    806 
    807 
    808 void MacroAssembler::LeaveExitFrame(bool save_doubles,
    809                                     Register argument_count) {
    810   // Optionally restore all double registers.
    811   if (save_doubles) {
    812     // Calculate the stack location of the saved doubles and restore them.
    813     const int offset = 2 * kPointerSize;
    814     sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
    815     DwVfpRegister first = d0;
    816     DwVfpRegister last =
    817         DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
    818     vldm(ia, r3, first, last);
    819   }
    820 
    821   // Clear top frame.
    822   mov(r3, Operand(0, RelocInfo::NONE));
    823   mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
    824   str(r3, MemOperand(ip));
    825 
    826   // Restore current context from top and clear it in debug mode.
    827   mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
    828   ldr(cp, MemOperand(ip));
    829 #ifdef DEBUG
    830   str(r3, MemOperand(ip));
    831 #endif
    832 
    833   // Tear down the exit frame, pop the arguments, and return.
    834   mov(sp, Operand(fp));
    835   ldm(ia_w, sp, fp.bit() | lr.bit());
    836   if (argument_count.is_valid()) {
    837     add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
    838   }
    839 }
    840 
    841 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
    842   vmov(dst, r0, r1);
    843 }
    844 
    845 
    846 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
    847                                     const ParameterCount& actual,
    848                                     Handle<Code> code_constant,
    849                                     Register code_reg,
    850                                     Label* done,
    851                                     InvokeFlag flag,
    852                                     CallWrapper* call_wrapper) {
    853   bool definitely_matches = false;
    854   Label regular_invoke;
    855 
    856   // Check whether the expected and actual arguments count match. If not,
    857   // setup registers according to contract with ArgumentsAdaptorTrampoline:
    858   //  r0: actual arguments count
    859   //  r1: function (passed through to callee)
    860   //  r2: expected arguments count
    861   //  r3: callee code entry
    862 
    863   // The code below is made a lot easier because the calling code already sets
    864   // up actual and expected registers according to the contract if values are
    865   // passed in registers.
    866   ASSERT(actual.is_immediate() || actual.reg().is(r0));
    867   ASSERT(expected.is_immediate() || expected.reg().is(r2));
    868   ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
    869 
    870   if (expected.is_immediate()) {
    871     ASSERT(actual.is_immediate());
    872     if (expected.immediate() == actual.immediate()) {
    873       definitely_matches = true;
    874     } else {
    875       mov(r0, Operand(actual.immediate()));
    876       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
    877       if (expected.immediate() == sentinel) {
    878         // Don't worry about adapting arguments for builtins that
    879         // don't want that done. Skip adaption code by making it look
    880         // like we have a match between expected and actual number of
    881         // arguments.
    882         definitely_matches = true;
    883       } else {
    884         mov(r2, Operand(expected.immediate()));
    885       }
    886     }
    887   } else {
    888     if (actual.is_immediate()) {
    889       cmp(expected.reg(), Operand(actual.immediate()));
    890       b(eq, &regular_invoke);
    891       mov(r0, Operand(actual.immediate()));
    892     } else {
    893       cmp(expected.reg(), Operand(actual.reg()));
    894       b(eq, &regular_invoke);
    895     }
    896   }
    897 
    898   if (!definitely_matches) {
    899     if (!code_constant.is_null()) {
    900       mov(r3, Operand(code_constant));
    901       add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
    902     }
    903 
    904     Handle<Code> adaptor =
    905         isolate()->builtins()->ArgumentsAdaptorTrampoline();
    906     if (flag == CALL_FUNCTION) {
    907       if (call_wrapper != NULL) {
    908         call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
    909       }
    910       Call(adaptor, RelocInfo::CODE_TARGET);
    911       if (call_wrapper != NULL) call_wrapper->AfterCall();
    912       b(done);
    913     } else {
    914       Jump(adaptor, RelocInfo::CODE_TARGET);
    915     }
    916     bind(&regular_invoke);
    917   }
    918 }
    919 
    920 
    921 void MacroAssembler::InvokeCode(Register code,
    922                                 const ParameterCount& expected,
    923                                 const ParameterCount& actual,
    924                                 InvokeFlag flag,
    925                                 CallWrapper* call_wrapper) {
    926   Label done;
    927 
    928   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
    929                  call_wrapper);
    930   if (flag == CALL_FUNCTION) {
    931     if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
    932     Call(code);
    933     if (call_wrapper != NULL) call_wrapper->AfterCall();
    934   } else {
    935     ASSERT(flag == JUMP_FUNCTION);
    936     Jump(code);
    937   }
    938 
    939   // Continue here if InvokePrologue does handle the invocation due to
    940   // mismatched parameter counts.
    941   bind(&done);
    942 }
    943 
    944 
    945 void MacroAssembler::InvokeCode(Handle<Code> code,
    946                                 const ParameterCount& expected,
    947                                 const ParameterCount& actual,
    948                                 RelocInfo::Mode rmode,
    949                                 InvokeFlag flag) {
    950   Label done;
    951 
    952   InvokePrologue(expected, actual, code, no_reg, &done, flag);
    953   if (flag == CALL_FUNCTION) {
    954     Call(code, rmode);
    955   } else {
    956     Jump(code, rmode);
    957   }
    958 
    959   // Continue here if InvokePrologue does handle the invocation due to
    960   // mismatched parameter counts.
    961   bind(&done);
    962 }
    963 
    964 
    965 void MacroAssembler::InvokeFunction(Register fun,
    966                                     const ParameterCount& actual,
    967                                     InvokeFlag flag,
    968                                     CallWrapper* call_wrapper) {
    969   // Contract with called JS functions requires that function is passed in r1.
    970   ASSERT(fun.is(r1));
    971 
    972   Register expected_reg = r2;
    973   Register code_reg = r3;
    974 
    975   ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
    976   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
    977   ldr(expected_reg,
    978       FieldMemOperand(code_reg,
    979                       SharedFunctionInfo::kFormalParameterCountOffset));
    980   mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
    981   ldr(code_reg,
    982       FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
    983 
    984   ParameterCount expected(expected_reg);
    985   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
    986 }
    987 
    988 
    989 void MacroAssembler::InvokeFunction(JSFunction* function,
    990                                     const ParameterCount& actual,
    991                                     InvokeFlag flag) {
    992   ASSERT(function->is_compiled());
    993 
    994   // Get the function and setup the context.
    995   mov(r1, Operand(Handle<JSFunction>(function)));
    996   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
    997 
    998   // Invoke the cached code.
    999   Handle<Code> code(function->code());
   1000   ParameterCount expected(function->shared()->formal_parameter_count());
   1001   if (V8::UseCrankshaft()) {
   1002     // TODO(kasperl): For now, we always call indirectly through the
   1003     // code field in the function to allow recompilation to take effect
   1004     // without changing any of the call sites.
   1005     ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   1006     InvokeCode(r3, expected, actual, flag);
   1007   } else {
   1008     InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
   1009   }
   1010 }
   1011 
   1012 
   1013 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
   1014                                           Register map,
   1015                                           Register scratch,
   1016                                           Label* fail) {
   1017   ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
   1018   IsInstanceJSObjectType(map, scratch, fail);
   1019 }
   1020 
   1021 
   1022 void MacroAssembler::IsInstanceJSObjectType(Register map,
   1023                                             Register scratch,
   1024                                             Label* fail) {
   1025   ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
   1026   cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
   1027   b(lt, fail);
   1028   cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
   1029   b(gt, fail);
   1030 }
   1031 
   1032 
   1033 void MacroAssembler::IsObjectJSStringType(Register object,
   1034                                           Register scratch,
   1035                                           Label* fail) {
   1036   ASSERT(kNotStringTag != 0);
   1037 
   1038   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   1039   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1040   tst(scratch, Operand(kIsNotStringMask));
   1041   b(ne, fail);
   1042 }
   1043 
   1044 
   1045 #ifdef ENABLE_DEBUGGER_SUPPORT
   1046 void MacroAssembler::DebugBreak() {
   1047   ASSERT(allow_stub_calls());
   1048   mov(r0, Operand(0, RelocInfo::NONE));
   1049   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   1050   CEntryStub ces(1);
   1051   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
   1052 }
   1053 #endif
   1054 
   1055 
   1056 void MacroAssembler::PushTryHandler(CodeLocation try_location,
   1057                                     HandlerType type) {
   1058   // Adjust this code if not the case.
   1059   ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
   1060   // The pc (return address) is passed in register lr.
   1061   if (try_location == IN_JAVASCRIPT) {
   1062     if (type == TRY_CATCH_HANDLER) {
   1063       mov(r3, Operand(StackHandler::TRY_CATCH));
   1064     } else {
   1065       mov(r3, Operand(StackHandler::TRY_FINALLY));
   1066     }
   1067     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
   1068            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
   1069            && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   1070     stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
   1071     // Save the current handler as the next handler.
   1072     mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
   1073     ldr(r1, MemOperand(r3));
   1074     ASSERT(StackHandlerConstants::kNextOffset == 0);
   1075     push(r1);
   1076     // Link this handler as the new current one.
   1077     str(sp, MemOperand(r3));
   1078   } else {
   1079     // Must preserve r0-r4, r5-r7 are available.
   1080     ASSERT(try_location == IN_JS_ENTRY);
   1081     // The frame pointer does not point to a JS frame so we save NULL
   1082     // for fp. We expect the code throwing an exception to check fp
   1083     // before dereferencing it to restore the context.
   1084     mov(ip, Operand(0, RelocInfo::NONE));  // To save a NULL frame pointer.
   1085     mov(r6, Operand(StackHandler::ENTRY));
   1086     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
   1087            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
   1088            && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   1089     stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
   1090     // Save the current handler as the next handler.
   1091     mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
   1092     ldr(r6, MemOperand(r7));
   1093     ASSERT(StackHandlerConstants::kNextOffset == 0);
   1094     push(r6);
   1095     // Link this handler as the new current one.
   1096     str(sp, MemOperand(r7));
   1097   }
   1098 }
   1099 
   1100 
   1101 void MacroAssembler::PopTryHandler() {
   1102   ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
   1103   pop(r1);
   1104   mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
   1105   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
   1106   str(r1, MemOperand(ip));
   1107 }
   1108 
   1109 
   1110 void MacroAssembler::Throw(Register value) {
   1111   // r0 is expected to hold the exception.
   1112   if (!value.is(r0)) {
   1113     mov(r0, value);
   1114   }
   1115 
   1116   // Adjust this code if not the case.
   1117   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
   1118 
   1119   // Drop the sp to the top of the handler.
   1120   mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
   1121   ldr(sp, MemOperand(r3));
   1122 
   1123   // Restore the next handler and frame pointer, discard handler state.
   1124   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   1125   pop(r2);
   1126   str(r2, MemOperand(r3));
   1127   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
   1128   ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
   1129 
   1130   // Before returning we restore the context from the frame pointer if
   1131   // not NULL.  The frame pointer is NULL in the exception handler of a
   1132   // JS entry frame.
   1133   cmp(fp, Operand(0, RelocInfo::NONE));
   1134   // Set cp to NULL if fp is NULL.
   1135   mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
   1136   // Restore cp otherwise.
   1137   ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
   1138 #ifdef DEBUG
   1139   if (emit_debug_code()) {
   1140     mov(lr, Operand(pc));
   1141   }
   1142 #endif
   1143   STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   1144   pop(pc);
   1145 }
   1146 
   1147 
   1148 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
   1149                                       Register value) {
   1150   // Adjust this code if not the case.
   1151   STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
   1152 
   1153   // r0 is expected to hold the exception.
   1154   if (!value.is(r0)) {
   1155     mov(r0, value);
   1156   }
   1157 
   1158   // Drop sp to the top stack handler.
   1159   mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
   1160   ldr(sp, MemOperand(r3));
   1161 
   1162   // Unwind the handlers until the ENTRY handler is found.
   1163   Label loop, done;
   1164   bind(&loop);
   1165   // Load the type of the current stack handler.
   1166   const int kStateOffset = StackHandlerConstants::kStateOffset;
   1167   ldr(r2, MemOperand(sp, kStateOffset));
   1168   cmp(r2, Operand(StackHandler::ENTRY));
   1169   b(eq, &done);
   1170   // Fetch the next handler in the list.
   1171   const int kNextOffset = StackHandlerConstants::kNextOffset;
   1172   ldr(sp, MemOperand(sp, kNextOffset));
   1173   jmp(&loop);
   1174   bind(&done);
   1175 
   1176   // Set the top handler address to next handler past the current ENTRY handler.
   1177   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   1178   pop(r2);
   1179   str(r2, MemOperand(r3));
   1180 
   1181   if (type == OUT_OF_MEMORY) {
   1182     // Set external caught exception to false.
   1183     ExternalReference external_caught(
   1184         Isolate::k_external_caught_exception_address, isolate());
   1185     mov(r0, Operand(false, RelocInfo::NONE));
   1186     mov(r2, Operand(external_caught));
   1187     str(r0, MemOperand(r2));
   1188 
   1189     // Set pending exception and r0 to out of memory exception.
   1190     Failure* out_of_memory = Failure::OutOfMemoryException();
   1191     mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
   1192     mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
   1193                                       isolate())));
   1194     str(r0, MemOperand(r2));
   1195   }
   1196 
   1197   // Stack layout at this point. See also StackHandlerConstants.
   1198   // sp ->   state (ENTRY)
   1199   //         fp
   1200   //         lr
   1201 
   1202   // Discard handler state (r2 is not used) and restore frame pointer.
   1203   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
   1204   ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
   1205   // Before returning we restore the context from the frame pointer if
   1206   // not NULL.  The frame pointer is NULL in the exception handler of a
   1207   // JS entry frame.
   1208   cmp(fp, Operand(0, RelocInfo::NONE));
   1209   // Set cp to NULL if fp is NULL.
   1210   mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
   1211   // Restore cp otherwise.
   1212   ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
   1213 #ifdef DEBUG
   1214   if (emit_debug_code()) {
   1215     mov(lr, Operand(pc));
   1216   }
   1217 #endif
   1218   STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   1219   pop(pc);
   1220 }
   1221 
   1222 
   1223 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   1224                                             Register scratch,
   1225                                             Label* miss) {
   1226   Label same_contexts;
   1227 
   1228   ASSERT(!holder_reg.is(scratch));
   1229   ASSERT(!holder_reg.is(ip));
   1230   ASSERT(!scratch.is(ip));
   1231 
   1232   // Load current lexical context from the stack frame.
   1233   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1234   // In debug mode, make sure the lexical context is set.
   1235 #ifdef DEBUG
   1236   cmp(scratch, Operand(0, RelocInfo::NONE));
   1237   Check(ne, "we should not have an empty lexical context");
   1238 #endif
   1239 
   1240   // Load the global context of the current context.
   1241   int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   1242   ldr(scratch, FieldMemOperand(scratch, offset));
   1243   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
   1244 
   1245   // Check the context is a global context.
   1246   if (emit_debug_code()) {
   1247     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
   1248     // Cannot use ip as a temporary in this verification code. Due to the fact
   1249     // that ip is clobbered as part of cmp with an object Operand.
   1250     push(holder_reg);  // Temporarily save holder on the stack.
   1251     // Read the first word and compare to the global_context_map.
   1252     ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
   1253     LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
   1254     cmp(holder_reg, ip);
   1255     Check(eq, "JSGlobalObject::global_context should be a global context.");
   1256     pop(holder_reg);  // Restore holder.
   1257   }
   1258 
   1259   // Check if both contexts are the same.
   1260   ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
   1261   cmp(scratch, Operand(ip));
   1262   b(eq, &same_contexts);
   1263 
   1264   // Check the context is a global context.
   1265   if (emit_debug_code()) {
   1266     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
   1267     // Cannot use ip as a temporary in this verification code. Due to the fact
   1268     // that ip is clobbered as part of cmp with an object Operand.
   1269     push(holder_reg);  // Temporarily save holder on the stack.
   1270     mov(holder_reg, ip);  // Move ip to its holding place.
   1271     LoadRoot(ip, Heap::kNullValueRootIndex);
   1272     cmp(holder_reg, ip);
   1273     Check(ne, "JSGlobalProxy::context() should not be null.");
   1274 
   1275     ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
   1276     LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
   1277     cmp(holder_reg, ip);
   1278     Check(eq, "JSGlobalObject::global_context should be a global context.");
   1279     // Restore ip is not needed. ip is reloaded below.
   1280     pop(holder_reg);  // Restore holder.
   1281     // Restore ip to holder's context.
   1282     ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
   1283   }
   1284 
   1285   // Check that the security token in the calling global object is
   1286   // compatible with the security token in the receiving global
   1287   // object.
   1288   int token_offset = Context::kHeaderSize +
   1289                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
   1290 
   1291   ldr(scratch, FieldMemOperand(scratch, token_offset));
   1292   ldr(ip, FieldMemOperand(ip, token_offset));
   1293   cmp(scratch, Operand(ip));
   1294   b(ne, miss);
   1295 
   1296   bind(&same_contexts);
   1297 }
   1298 
   1299 
   1300 void MacroAssembler::AllocateInNewSpace(int object_size,
   1301                                         Register result,
   1302                                         Register scratch1,
   1303                                         Register scratch2,
   1304                                         Label* gc_required,
   1305                                         AllocationFlags flags) {
   1306   if (!FLAG_inline_new) {
   1307     if (emit_debug_code()) {
   1308       // Trash the registers to simulate an allocation failure.
   1309       mov(result, Operand(0x7091));
   1310       mov(scratch1, Operand(0x7191));
   1311       mov(scratch2, Operand(0x7291));
   1312     }
   1313     jmp(gc_required);
   1314     return;
   1315   }
   1316 
   1317   ASSERT(!result.is(scratch1));
   1318   ASSERT(!result.is(scratch2));
   1319   ASSERT(!scratch1.is(scratch2));
   1320   ASSERT(!scratch1.is(ip));
   1321   ASSERT(!scratch2.is(ip));
   1322 
   1323   // Make object size into bytes.
   1324   if ((flags & SIZE_IN_WORDS) != 0) {
   1325     object_size *= kPointerSize;
   1326   }
   1327   ASSERT_EQ(0, object_size & kObjectAlignmentMask);
   1328 
   1329   // Check relative positions of allocation top and limit addresses.
   1330   // The values must be adjacent in memory to allow the use of LDM.
   1331   // Also, assert that the registers are numbered such that the values
   1332   // are loaded in the correct order.
   1333   ExternalReference new_space_allocation_top =
   1334       ExternalReference::new_space_allocation_top_address(isolate());
   1335   ExternalReference new_space_allocation_limit =
   1336       ExternalReference::new_space_allocation_limit_address(isolate());
   1337   intptr_t top   =
   1338       reinterpret_cast<intptr_t>(new_space_allocation_top.address());
   1339   intptr_t limit =
   1340       reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
   1341   ASSERT((limit - top) == kPointerSize);
   1342   ASSERT(result.code() < ip.code());
   1343 
   1344   // Set up allocation top address and object size registers.
   1345   Register topaddr = scratch1;
   1346   Register obj_size_reg = scratch2;
   1347   mov(topaddr, Operand(new_space_allocation_top));
   1348   mov(obj_size_reg, Operand(object_size));
   1349 
   1350   // This code stores a temporary value in ip. This is OK, as the code below
   1351   // does not need ip for implicit literal generation.
   1352   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   1353     // Load allocation top into result and allocation limit into ip.
   1354     ldm(ia, topaddr, result.bit() | ip.bit());
   1355   } else {
   1356     if (emit_debug_code()) {
   1357       // Assert that result actually contains top on entry. ip is used
   1358       // immediately below so this use of ip does not cause difference with
   1359       // respect to register content between debug and release mode.
   1360       ldr(ip, MemOperand(topaddr));
   1361       cmp(result, ip);
   1362       Check(eq, "Unexpected allocation top");
   1363     }
   1364     // Load allocation limit into ip. Result already contains allocation top.
   1365     ldr(ip, MemOperand(topaddr, limit - top));
   1366   }
   1367 
   1368   // Calculate new top and bail out if new space is exhausted. Use result
   1369   // to calculate the new top.
   1370   add(scratch2, result, Operand(obj_size_reg), SetCC);
   1371   b(cs, gc_required);
   1372   cmp(scratch2, Operand(ip));
   1373   b(hi, gc_required);
   1374   str(scratch2, MemOperand(topaddr));
   1375 
   1376   // Tag object if requested.
   1377   if ((flags & TAG_OBJECT) != 0) {
   1378     add(result, result, Operand(kHeapObjectTag));
   1379   }
   1380 }
   1381 
   1382 
   1383 void MacroAssembler::AllocateInNewSpace(Register object_size,
   1384                                         Register result,
   1385                                         Register scratch1,
   1386                                         Register scratch2,
   1387                                         Label* gc_required,
   1388                                         AllocationFlags flags) {
   1389   if (!FLAG_inline_new) {
   1390     if (emit_debug_code()) {
   1391       // Trash the registers to simulate an allocation failure.
   1392       mov(result, Operand(0x7091));
   1393       mov(scratch1, Operand(0x7191));
   1394       mov(scratch2, Operand(0x7291));
   1395     }
   1396     jmp(gc_required);
   1397     return;
   1398   }
   1399 
   1400   // Assert that the register arguments are different and that none of
   1401   // them are ip. ip is used explicitly in the code generated below.
   1402   ASSERT(!result.is(scratch1));
   1403   ASSERT(!result.is(scratch2));
   1404   ASSERT(!scratch1.is(scratch2));
   1405   ASSERT(!result.is(ip));
   1406   ASSERT(!scratch1.is(ip));
   1407   ASSERT(!scratch2.is(ip));
   1408 
   1409   // Check relative positions of allocation top and limit addresses.
   1410   // The values must be adjacent in memory to allow the use of LDM.
   1411   // Also, assert that the registers are numbered such that the values
   1412   // are loaded in the correct order.
   1413   ExternalReference new_space_allocation_top =
   1414       ExternalReference::new_space_allocation_top_address(isolate());
   1415   ExternalReference new_space_allocation_limit =
   1416       ExternalReference::new_space_allocation_limit_address(isolate());
   1417   intptr_t top =
   1418       reinterpret_cast<intptr_t>(new_space_allocation_top.address());
   1419   intptr_t limit =
   1420       reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
   1421   ASSERT((limit - top) == kPointerSize);
   1422   ASSERT(result.code() < ip.code());
   1423 
   1424   // Set up allocation top address.
   1425   Register topaddr = scratch1;
   1426   mov(topaddr, Operand(new_space_allocation_top));
   1427 
   1428   // This code stores a temporary value in ip. This is OK, as the code below
   1429   // does not need ip for implicit literal generation.
   1430   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   1431     // Load allocation top into result and allocation limit into ip.
   1432     ldm(ia, topaddr, result.bit() | ip.bit());
   1433   } else {
   1434     if (emit_debug_code()) {
   1435       // Assert that result actually contains top on entry. ip is used
   1436       // immediately below so this use of ip does not cause difference with
   1437       // respect to register content between debug and release mode.
   1438       ldr(ip, MemOperand(topaddr));
   1439       cmp(result, ip);
   1440       Check(eq, "Unexpected allocation top");
   1441     }
   1442     // Load allocation limit into ip. Result already contains allocation top.
   1443     ldr(ip, MemOperand(topaddr, limit - top));
   1444   }
   1445 
   1446   // Calculate new top and bail out if new space is exhausted. Use result
   1447   // to calculate the new top. Object size may be in words so a shift is
   1448   // required to get the number of bytes.
   1449   if ((flags & SIZE_IN_WORDS) != 0) {
   1450     add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
   1451   } else {
   1452     add(scratch2, result, Operand(object_size), SetCC);
   1453   }
   1454   b(cs, gc_required);
   1455   cmp(scratch2, Operand(ip));
   1456   b(hi, gc_required);
   1457 
   1458   // Update allocation top. result temporarily holds the new top.
   1459   if (emit_debug_code()) {
   1460     tst(scratch2, Operand(kObjectAlignmentMask));
   1461     Check(eq, "Unaligned allocation in new space");
   1462   }
   1463   str(scratch2, MemOperand(topaddr));
   1464 
   1465   // Tag object if requested.
   1466   if ((flags & TAG_OBJECT) != 0) {
   1467     add(result, result, Operand(kHeapObjectTag));
   1468   }
   1469 }
   1470 
   1471 
   1472 void MacroAssembler::UndoAllocationInNewSpace(Register object,
   1473                                               Register scratch) {
   1474   ExternalReference new_space_allocation_top =
   1475       ExternalReference::new_space_allocation_top_address(isolate());
   1476 
   1477   // Make sure the object has no tag before resetting top.
   1478   and_(object, object, Operand(~kHeapObjectTagMask));
   1479 #ifdef DEBUG
   1480   // Check that the object un-allocated is below the current top.
   1481   mov(scratch, Operand(new_space_allocation_top));
   1482   ldr(scratch, MemOperand(scratch));
   1483   cmp(object, scratch);
   1484   Check(lt, "Undo allocation of non allocated memory");
   1485 #endif
   1486   // Write the address of the object to un-allocate as the current top.
   1487   mov(scratch, Operand(new_space_allocation_top));
   1488   str(object, MemOperand(scratch));
   1489 }
   1490 
   1491 
   1492 void MacroAssembler::AllocateTwoByteString(Register result,
   1493                                            Register length,
   1494                                            Register scratch1,
   1495                                            Register scratch2,
   1496                                            Register scratch3,
   1497                                            Label* gc_required) {
   1498   // Calculate the number of bytes needed for the characters in the string while
   1499   // observing object alignment.
   1500   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   1501   mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
   1502   add(scratch1, scratch1,
   1503       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
   1504   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
   1505 
   1506   // Allocate two-byte string in new space.
   1507   AllocateInNewSpace(scratch1,
   1508                      result,
   1509                      scratch2,
   1510                      scratch3,
   1511                      gc_required,
   1512                      TAG_OBJECT);
   1513 
   1514   // Set the map, length and hash field.
   1515   InitializeNewString(result,
   1516                       length,
   1517                       Heap::kStringMapRootIndex,
   1518                       scratch1,
   1519                       scratch2);
   1520 }
   1521 
   1522 
   1523 void MacroAssembler::AllocateAsciiString(Register result,
   1524                                          Register length,
   1525                                          Register scratch1,
   1526                                          Register scratch2,
   1527                                          Register scratch3,
   1528                                          Label* gc_required) {
   1529   // Calculate the number of bytes needed for the characters in the string while
   1530   // observing object alignment.
   1531   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   1532   ASSERT(kCharSize == 1);
   1533   add(scratch1, length,
   1534       Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
   1535   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
   1536 
   1537   // Allocate ASCII string in new space.
   1538   AllocateInNewSpace(scratch1,
   1539                      result,
   1540                      scratch2,
   1541                      scratch3,
   1542                      gc_required,
   1543                      TAG_OBJECT);
   1544 
   1545   // Set the map, length and hash field.
   1546   InitializeNewString(result,
   1547                       length,
   1548                       Heap::kAsciiStringMapRootIndex,
   1549                       scratch1,
   1550                       scratch2);
   1551 }
   1552 
   1553 
   1554 void MacroAssembler::AllocateTwoByteConsString(Register result,
   1555                                                Register length,
   1556                                                Register scratch1,
   1557                                                Register scratch2,
   1558                                                Label* gc_required) {
   1559   AllocateInNewSpace(ConsString::kSize,
   1560                      result,
   1561                      scratch1,
   1562                      scratch2,
   1563                      gc_required,
   1564                      TAG_OBJECT);
   1565 
   1566   InitializeNewString(result,
   1567                       length,
   1568                       Heap::kConsStringMapRootIndex,
   1569                       scratch1,
   1570                       scratch2);
   1571 }
   1572 
   1573 
   1574 void MacroAssembler::AllocateAsciiConsString(Register result,
   1575                                              Register length,
   1576                                              Register scratch1,
   1577                                              Register scratch2,
   1578                                              Label* gc_required) {
   1579   AllocateInNewSpace(ConsString::kSize,
   1580                      result,
   1581                      scratch1,
   1582                      scratch2,
   1583                      gc_required,
   1584                      TAG_OBJECT);
   1585 
   1586   InitializeNewString(result,
   1587                       length,
   1588                       Heap::kConsAsciiStringMapRootIndex,
   1589                       scratch1,
   1590                       scratch2);
   1591 }
   1592 
   1593 
   1594 void MacroAssembler::CompareObjectType(Register object,
   1595                                        Register map,
   1596                                        Register type_reg,
   1597                                        InstanceType type) {
   1598   ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
   1599   CompareInstanceType(map, type_reg, type);
   1600 }
   1601 
   1602 
   1603 void MacroAssembler::CompareInstanceType(Register map,
   1604                                          Register type_reg,
   1605                                          InstanceType type) {
   1606   ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
   1607   cmp(type_reg, Operand(type));
   1608 }
   1609 
   1610 
   1611 void MacroAssembler::CompareRoot(Register obj,
   1612                                  Heap::RootListIndex index) {
   1613   ASSERT(!obj.is(ip));
   1614   LoadRoot(ip, index);
   1615   cmp(obj, ip);
   1616 }
   1617 
   1618 
   1619 void MacroAssembler::CheckMap(Register obj,
   1620                               Register scratch,
   1621                               Handle<Map> map,
   1622                               Label* fail,
   1623                               bool is_heap_object) {
   1624   if (!is_heap_object) {
   1625     JumpIfSmi(obj, fail);
   1626   }
   1627   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   1628   mov(ip, Operand(map));
   1629   cmp(scratch, ip);
   1630   b(ne, fail);
   1631 }
   1632 
   1633 
   1634 void MacroAssembler::CheckMap(Register obj,
   1635                               Register scratch,
   1636                               Heap::RootListIndex index,
   1637                               Label* fail,
   1638                               bool is_heap_object) {
   1639   if (!is_heap_object) {
   1640     JumpIfSmi(obj, fail);
   1641   }
   1642   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   1643   LoadRoot(ip, index);
   1644   cmp(scratch, ip);
   1645   b(ne, fail);
   1646 }
   1647 
   1648 
   1649 void MacroAssembler::TryGetFunctionPrototype(Register function,
   1650                                              Register result,
   1651                                              Register scratch,
   1652                                              Label* miss) {
   1653   // Check that the receiver isn't a smi.
   1654   JumpIfSmi(function, miss);
   1655 
   1656   // Check that the function really is a function.  Load map into result reg.
   1657   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   1658   b(ne, miss);
   1659 
   1660   // Make sure that the function has an instance prototype.
   1661   Label non_instance;
   1662   ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   1663   tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
   1664   b(ne, &non_instance);
   1665 
   1666   // Get the prototype or initial map from the function.
   1667   ldr(result,
   1668       FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   1669 
   1670   // If the prototype or initial map is the hole, don't return it and
   1671   // simply miss the cache instead. This will allow us to allocate a
   1672   // prototype object on-demand in the runtime system.
   1673   LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   1674   cmp(result, ip);
   1675   b(eq, miss);
   1676 
   1677   // If the function does not have an initial map, we're done.
   1678   Label done;
   1679   CompareObjectType(result, scratch, scratch, MAP_TYPE);
   1680   b(ne, &done);
   1681 
   1682   // Get the prototype from the initial map.
   1683   ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   1684   jmp(&done);
   1685 
   1686   // Non-instance prototype: Fetch prototype from constructor field
   1687   // in initial map.
   1688   bind(&non_instance);
   1689   ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
   1690 
   1691   // All done.
   1692   bind(&done);
   1693 }
   1694 
   1695 
   1696 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
   1697   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   1698   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
   1699 }
   1700 
   1701 
   1702 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
   1703   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   1704   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
   1705 }
   1706 
   1707 
   1708 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
   1709   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   1710   Object* result;
   1711   { MaybeObject* maybe_result = stub->TryGetCode();
   1712     if (!maybe_result->ToObject(&result)) return maybe_result;
   1713   }
   1714   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
   1715   return result;
   1716 }
   1717 
   1718 
   1719 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   1720   return ref0.address() - ref1.address();
   1721 }
   1722 
   1723 
   1724 MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
   1725     ExternalReference function, int stack_space) {
   1726   ExternalReference next_address =
   1727       ExternalReference::handle_scope_next_address();
   1728   const int kNextOffset = 0;
   1729   const int kLimitOffset = AddressOffset(
   1730       ExternalReference::handle_scope_limit_address(),
   1731       next_address);
   1732   const int kLevelOffset = AddressOffset(
   1733       ExternalReference::handle_scope_level_address(),
   1734       next_address);
   1735 
   1736   // Allocate HandleScope in callee-save registers.
   1737   mov(r7, Operand(next_address));
   1738   ldr(r4, MemOperand(r7, kNextOffset));
   1739   ldr(r5, MemOperand(r7, kLimitOffset));
   1740   ldr(r6, MemOperand(r7, kLevelOffset));
   1741   add(r6, r6, Operand(1));
   1742   str(r6, MemOperand(r7, kLevelOffset));
   1743 
   1744   // Native call returns to the DirectCEntry stub which redirects to the
   1745   // return address pushed on stack (could have moved after GC).
   1746   // DirectCEntry stub itself is generated early and never moves.
   1747   DirectCEntryStub stub;
   1748   stub.GenerateCall(this, function);
   1749 
   1750   Label promote_scheduled_exception;
   1751   Label delete_allocated_handles;
   1752   Label leave_exit_frame;
   1753 
   1754   // If result is non-zero, dereference to get the result value
   1755   // otherwise set it to undefined.
   1756   cmp(r0, Operand(0));
   1757   LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
   1758   ldr(r0, MemOperand(r0), ne);
   1759 
   1760   // No more valid handles (the result handle was the last one). Restore
   1761   // previous handle scope.
   1762   str(r4, MemOperand(r7, kNextOffset));
   1763   if (emit_debug_code()) {
   1764     ldr(r1, MemOperand(r7, kLevelOffset));
   1765     cmp(r1, r6);
   1766     Check(eq, "Unexpected level after return from api call");
   1767   }
   1768   sub(r6, r6, Operand(1));
   1769   str(r6, MemOperand(r7, kLevelOffset));
   1770   ldr(ip, MemOperand(r7, kLimitOffset));
   1771   cmp(r5, ip);
   1772   b(ne, &delete_allocated_handles);
   1773 
   1774   // Check if the function scheduled an exception.
   1775   bind(&leave_exit_frame);
   1776   LoadRoot(r4, Heap::kTheHoleValueRootIndex);
   1777   mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
   1778   ldr(r5, MemOperand(ip));
   1779   cmp(r4, r5);
   1780   b(ne, &promote_scheduled_exception);
   1781 
   1782   // LeaveExitFrame expects unwind space to be in a register.
   1783   mov(r4, Operand(stack_space));
   1784   LeaveExitFrame(false, r4);
   1785   mov(pc, lr);
   1786 
   1787   bind(&promote_scheduled_exception);
   1788   MaybeObject* result
   1789       = TryTailCallExternalReference(
   1790           ExternalReference(Runtime::kPromoteScheduledException, isolate()),
   1791           0,
   1792           1);
   1793   if (result->IsFailure()) {
   1794     return result;
   1795   }
   1796 
   1797   // HandleScope limit has changed. Delete allocated extensions.
   1798   bind(&delete_allocated_handles);
   1799   str(r5, MemOperand(r7, kLimitOffset));
   1800   mov(r4, r0);
   1801   PrepareCallCFunction(1, r5);
   1802   mov(r0, Operand(ExternalReference::isolate_address()));
   1803   CallCFunction(
   1804       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   1805   mov(r0, r4);
   1806   jmp(&leave_exit_frame);
   1807 
   1808   return result;
   1809 }
   1810 
   1811 
   1812 void MacroAssembler::IllegalOperation(int num_arguments) {
   1813   if (num_arguments > 0) {
   1814     add(sp, sp, Operand(num_arguments * kPointerSize));
   1815   }
   1816   LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   1817 }
   1818 
   1819 
   1820 void MacroAssembler::IndexFromHash(Register hash, Register index) {
   1821   // If the hash field contains an array index pick it out. The assert checks
   1822   // that the constants for the maximum number of digits for an array index
   1823   // cached in the hash field and the number of bits reserved for it does not
   1824   // conflict.
   1825   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
   1826          (1 << String::kArrayIndexValueBits));
   1827   // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
   1828   // the low kHashShift bits.
   1829   STATIC_ASSERT(kSmiTag == 0);
   1830   Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
   1831   mov(index, Operand(hash, LSL, kSmiTagSize));
   1832 }
   1833 
   1834 
   1835 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
   1836                                                        Register outHighReg,
   1837                                                        Register outLowReg) {
   1838   // ARMv7 VFP3 instructions to implement integer to double conversion.
   1839   mov(r7, Operand(inReg, ASR, kSmiTagSize));
   1840   vmov(s15, r7);
   1841   vcvt_f64_s32(d7, s15);
   1842   vmov(outLowReg, outHighReg, d7);
   1843 }
   1844 
   1845 
   1846 void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
   1847                                                DwVfpRegister result,
   1848                                                Register scratch1,
   1849                                                Register scratch2,
   1850                                                Register heap_number_map,
   1851                                                SwVfpRegister scratch3,
   1852                                                Label* not_number,
   1853                                                ObjectToDoubleFlags flags) {
   1854   Label done;
   1855   if ((flags & OBJECT_NOT_SMI) == 0) {
   1856     Label not_smi;
   1857     JumpIfNotSmi(object, &not_smi);
   1858     // Remove smi tag and convert to double.
   1859     mov(scratch1, Operand(object, ASR, kSmiTagSize));
   1860     vmov(scratch3, scratch1);
   1861     vcvt_f64_s32(result, scratch3);
   1862     b(&done);
   1863     bind(&not_smi);
   1864   }
   1865   // Check for heap number and load double value from it.
   1866   ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
   1867   sub(scratch2, object, Operand(kHeapObjectTag));
   1868   cmp(scratch1, heap_number_map);
   1869   b(ne, not_number);
   1870   if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
   1871     // If exponent is all ones the number is either a NaN or +/-Infinity.
   1872     ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
   1873     Sbfx(scratch1,
   1874          scratch1,
   1875          HeapNumber::kExponentShift,
   1876          HeapNumber::kExponentBits);
   1877     // All-one value sign extend to -1.
   1878     cmp(scratch1, Operand(-1));
   1879     b(eq, not_number);
   1880   }
   1881   vldr(result, scratch2, HeapNumber::kValueOffset);
   1882   bind(&done);
   1883 }
   1884 
   1885 
   1886 void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
   1887                                             DwVfpRegister value,
   1888                                             Register scratch1,
   1889                                             SwVfpRegister scratch2) {
   1890   mov(scratch1, Operand(smi, ASR, kSmiTagSize));
   1891   vmov(scratch2, scratch1);
   1892   vcvt_f64_s32(value, scratch2);
   1893 }
   1894 
   1895 
   1896 // Tries to get a signed int32 out of a double precision floating point heap
   1897 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
   1898 // 32bits signed integer range.
   1899 void MacroAssembler::ConvertToInt32(Register source,
   1900                                     Register dest,
   1901                                     Register scratch,
   1902                                     Register scratch2,
   1903                                     DwVfpRegister double_scratch,
   1904                                     Label *not_int32) {
   1905   if (CpuFeatures::IsSupported(VFP3)) {
   1906     CpuFeatures::Scope scope(VFP3);
   1907     sub(scratch, source, Operand(kHeapObjectTag));
   1908     vldr(double_scratch, scratch, HeapNumber::kValueOffset);
   1909     vcvt_s32_f64(double_scratch.low(), double_scratch);
   1910     vmov(dest, double_scratch.low());
   1911     // Signed vcvt instruction will saturate to the minimum (0x80000000) or
   1912     // maximun (0x7fffffff) signed 32bits integer when the double is out of
   1913     // range. When substracting one, the minimum signed integer becomes the
   1914     // maximun signed integer.
   1915     sub(scratch, dest, Operand(1));
   1916     cmp(scratch, Operand(LONG_MAX - 1));
   1917     // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
   1918     b(ge, not_int32);
   1919   } else {
   1920     // This code is faster for doubles that are in the ranges -0x7fffffff to
   1921     // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
   1922     // the range of signed int32 values that are not Smis.  Jumps to the label
   1923     // 'not_int32' if the double isn't in the range -0x80000000.0 to
   1924     // 0x80000000.0 (excluding the endpoints).
   1925     Label right_exponent, done;
   1926     // Get exponent word.
   1927     ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
   1928     // Get exponent alone in scratch2.
   1929     Ubfx(scratch2,
   1930          scratch,
   1931          HeapNumber::kExponentShift,
   1932          HeapNumber::kExponentBits);
   1933     // Load dest with zero.  We use this either for the final shift or
   1934     // for the answer.
   1935     mov(dest, Operand(0, RelocInfo::NONE));
   1936     // Check whether the exponent matches a 32 bit signed int that is not a Smi.
   1937     // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
   1938     // the exponent that we are fastest at and also the highest exponent we can
   1939     // handle here.
   1940     const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
   1941     // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
   1942     // split it up to avoid a constant pool entry.  You can't do that in general
   1943     // for cmp because of the overflow flag, but we know the exponent is in the
   1944     // range 0-2047 so there is no overflow.
   1945     int fudge_factor = 0x400;
   1946     sub(scratch2, scratch2, Operand(fudge_factor));
   1947     cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
   1948     // If we have a match of the int32-but-not-Smi exponent then skip some
   1949     // logic.
   1950     b(eq, &right_exponent);
   1951     // If the exponent is higher than that then go to slow case.  This catches
   1952     // numbers that don't fit in a signed int32, infinities and NaNs.
   1953     b(gt, not_int32);
   1954 
   1955     // We know the exponent is smaller than 30 (biased).  If it is less than
   1956     // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
   1957     // it rounds to zero.
   1958     const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
   1959     sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
   1960     // Dest already has a Smi zero.
   1961     b(lt, &done);
   1962 
   1963     // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
   1964     // get how much to shift down.
   1965     rsb(dest, scratch2, Operand(30));
   1966 
   1967     bind(&right_exponent);
   1968     // Get the top bits of the mantissa.
   1969     and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
   1970     // Put back the implicit 1.
   1971     orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
   1972     // Shift up the mantissa bits to take up the space the exponent used to
   1973     // take. We just orred in the implicit bit so that took care of one and
   1974     // we want to leave the sign bit 0 so we subtract 2 bits from the shift
   1975     // distance.
   1976     const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
   1977     mov(scratch2, Operand(scratch2, LSL, shift_distance));
   1978     // Put sign in zero flag.
   1979     tst(scratch, Operand(HeapNumber::kSignMask));
   1980     // Get the second half of the double. For some exponents we don't
   1981     // actually need this because the bits get shifted out again, but
   1982     // it's probably slower to test than just to do it.
   1983     ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
   1984     // Shift down 22 bits to get the last 10 bits.
   1985     orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
   1986     // Move down according to the exponent.
   1987     mov(dest, Operand(scratch, LSR, dest));
   1988     // Fix sign if sign bit was set.
   1989     rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
   1990     bind(&done);
   1991   }
   1992 }
   1993 
   1994 
   1995 void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
   1996                                      SwVfpRegister result,
   1997                                      DwVfpRegister double_input,
   1998                                      Register scratch1,
   1999                                      Register scratch2,
   2000                                      CheckForInexactConversion check_inexact) {
   2001   ASSERT(CpuFeatures::IsSupported(VFP3));
   2002   CpuFeatures::Scope scope(VFP3);
   2003   Register prev_fpscr = scratch1;
   2004   Register scratch = scratch2;
   2005 
   2006   int32_t check_inexact_conversion =
   2007     (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
   2008 
   2009   // Set custom FPCSR:
   2010   //  - Set rounding mode.
   2011   //  - Clear vfp cumulative exception flags.
   2012   //  - Make sure Flush-to-zero mode control bit is unset.
   2013   vmrs(prev_fpscr);
   2014   bic(scratch,
   2015       prev_fpscr,
   2016       Operand(kVFPExceptionMask |
   2017               check_inexact_conversion |
   2018               kVFPRoundingModeMask |
   2019               kVFPFlushToZeroMask));
   2020   // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
   2021   if (rounding_mode != kRoundToNearest) {
   2022     orr(scratch, scratch, Operand(rounding_mode));
   2023   }
   2024   vmsr(scratch);
   2025 
   2026   // Convert the argument to an integer.
   2027   vcvt_s32_f64(result,
   2028                double_input,
   2029                (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
   2030                                                : kFPSCRRounding);
   2031 
   2032   // Retrieve FPSCR.
   2033   vmrs(scratch);
   2034   // Restore FPSCR.
   2035   vmsr(prev_fpscr);
   2036   // Check for vfp exceptions.
   2037   tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
   2038 }
   2039 
   2040 
   2041 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
   2042                                                  Register input_high,
   2043                                                  Register input_low,
   2044                                                  Register scratch) {
   2045   Label done, normal_exponent, restore_sign;
   2046 
   2047   // Extract the biased exponent in result.
   2048   Ubfx(result,
   2049        input_high,
   2050        HeapNumber::kExponentShift,
   2051        HeapNumber::kExponentBits);
   2052 
   2053   // Check for Infinity and NaNs, which should return 0.
   2054   cmp(result, Operand(HeapNumber::kExponentMask));
   2055   mov(result, Operand(0), LeaveCC, eq);
   2056   b(eq, &done);
   2057 
   2058   // Express exponent as delta to (number of mantissa bits + 31).
   2059   sub(result,
   2060       result,
   2061       Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
   2062       SetCC);
   2063 
   2064   // If the delta is strictly positive, all bits would be shifted away,
   2065   // which means that we can return 0.
   2066   b(le, &normal_exponent);
   2067   mov(result, Operand(0));
   2068   b(&done);
   2069 
   2070   bind(&normal_exponent);
   2071   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
   2072   // Calculate shift.
   2073   add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
   2074 
   2075   // Save the sign.
   2076   Register sign = result;
   2077   result = no_reg;
   2078   and_(sign, input_high, Operand(HeapNumber::kSignMask));
   2079 
   2080   // Set the implicit 1 before the mantissa part in input_high.
   2081   orr(input_high,
   2082       input_high,
   2083       Operand(1 << HeapNumber::kMantissaBitsInTopWord));
   2084   // Shift the mantissa bits to the correct position.
   2085   // We don't need to clear non-mantissa bits as they will be shifted away.
   2086   // If they weren't, it would mean that the answer is in the 32bit range.
   2087   mov(input_high, Operand(input_high, LSL, scratch));
   2088 
   2089   // Replace the shifted bits with bits from the lower mantissa word.
   2090   Label pos_shift, shift_done;
   2091   rsb(scratch, scratch, Operand(32), SetCC);
   2092   b(&pos_shift, ge);
   2093 
   2094   // Negate scratch.
   2095   rsb(scratch, scratch, Operand(0));
   2096   mov(input_low, Operand(input_low, LSL, scratch));
   2097   b(&shift_done);
   2098 
   2099   bind(&pos_shift);
   2100   mov(input_low, Operand(input_low, LSR, scratch));
   2101 
   2102   bind(&shift_done);
   2103   orr(input_high, input_high, Operand(input_low));
   2104   // Restore sign if necessary.
   2105   cmp(sign, Operand(0));
   2106   result = sign;
   2107   sign = no_reg;
   2108   rsb(result, input_high, Operand(0), LeaveCC, ne);
   2109   mov(result, input_high, LeaveCC, eq);
   2110   bind(&done);
   2111 }
   2112 
   2113 
   2114 void MacroAssembler::EmitECMATruncate(Register result,
   2115                                       DwVfpRegister double_input,
   2116                                       SwVfpRegister single_scratch,
   2117                                       Register scratch,
   2118                                       Register input_high,
   2119                                       Register input_low) {
   2120   CpuFeatures::Scope scope(VFP3);
   2121   ASSERT(!input_high.is(result));
   2122   ASSERT(!input_low.is(result));
   2123   ASSERT(!input_low.is(input_high));
   2124   ASSERT(!scratch.is(result) &&
   2125          !scratch.is(input_high) &&
   2126          !scratch.is(input_low));
   2127   ASSERT(!single_scratch.is(double_input.low()) &&
   2128          !single_scratch.is(double_input.high()));
   2129 
   2130   Label done;
   2131 
   2132   // Clear cumulative exception flags.
   2133   ClearFPSCRBits(kVFPExceptionMask, scratch);
   2134   // Try a conversion to a signed integer.
   2135   vcvt_s32_f64(single_scratch, double_input);
   2136   vmov(result, single_scratch);
   2137   // Retrieve he FPSCR.
   2138   vmrs(scratch);
   2139   // Check for overflow and NaNs.
   2140   tst(scratch, Operand(kVFPOverflowExceptionBit |
   2141                        kVFPUnderflowExceptionBit |
   2142                        kVFPInvalidOpExceptionBit));
   2143   // If we had no exceptions we are done.
   2144   b(eq, &done);
   2145 
   2146   // Load the double value and perform a manual truncation.
   2147   vmov(input_low, input_high, double_input);
   2148   EmitOutOfInt32RangeTruncate(result,
   2149                               input_high,
   2150                               input_low,
   2151                               scratch);
   2152   bind(&done);
   2153 }
   2154 
   2155 
   2156 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
   2157                                          Register src,
   2158                                          int num_least_bits) {
   2159   if (CpuFeatures::IsSupported(ARMv7)) {
   2160     ubfx(dst, src, kSmiTagSize, num_least_bits);
   2161   } else {
   2162     mov(dst, Operand(src, ASR, kSmiTagSize));
   2163     and_(dst, dst, Operand((1 << num_least_bits) - 1));
   2164   }
   2165 }
   2166 
   2167 
   2168 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
   2169                                            Register src,
   2170                                            int num_least_bits) {
   2171   and_(dst, src, Operand((1 << num_least_bits) - 1));
   2172 }
   2173 
   2174 
   2175 void MacroAssembler::CallRuntime(const Runtime::Function* f,
   2176                                  int num_arguments) {
   2177   // All parameters are on the stack.  r0 has the return value after call.
   2178 
   2179   // If the expected number of arguments of the runtime function is
   2180   // constant, we check that the actual number of arguments match the
   2181   // expectation.
   2182   if (f->nargs >= 0 && f->nargs != num_arguments) {
   2183     IllegalOperation(num_arguments);
   2184     return;
   2185   }
   2186 
   2187   // TODO(1236192): Most runtime routines don't need the number of
   2188   // arguments passed in because it is constant. At some point we
   2189   // should remove this need and make the runtime routine entry code
   2190   // smarter.
   2191   mov(r0, Operand(num_arguments));
   2192   mov(r1, Operand(ExternalReference(f, isolate())));
   2193   CEntryStub stub(1);
   2194   CallStub(&stub);
   2195 }
   2196 
   2197 
   2198 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
   2199   CallRuntime(Runtime::FunctionForId(fid), num_arguments);
   2200 }
   2201 
   2202 
   2203 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   2204   const Runtime::Function* function = Runtime::FunctionForId(id);
   2205   mov(r0, Operand(function->nargs));
   2206   mov(r1, Operand(ExternalReference(function, isolate())));
   2207   CEntryStub stub(1);
   2208   stub.SaveDoubles();
   2209   CallStub(&stub);
   2210 }
   2211 
   2212 
   2213 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
   2214                                            int num_arguments) {
   2215   mov(r0, Operand(num_arguments));
   2216   mov(r1, Operand(ext));
   2217 
   2218   CEntryStub stub(1);
   2219   CallStub(&stub);
   2220 }
   2221 
   2222 
   2223 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
   2224                                                int num_arguments,
   2225                                                int result_size) {
   2226   // TODO(1236192): Most runtime routines don't need the number of
   2227   // arguments passed in because it is constant. At some point we
   2228   // should remove this need and make the runtime routine entry code
   2229   // smarter.
   2230   mov(r0, Operand(num_arguments));
   2231   JumpToExternalReference(ext);
   2232 }
   2233 
   2234 
   2235 MaybeObject* MacroAssembler::TryTailCallExternalReference(
   2236     const ExternalReference& ext, int num_arguments, int result_size) {
   2237   // TODO(1236192): Most runtime routines don't need the number of
   2238   // arguments passed in because it is constant. At some point we
   2239   // should remove this need and make the runtime routine entry code
   2240   // smarter.
   2241   mov(r0, Operand(num_arguments));
   2242   return TryJumpToExternalReference(ext);
   2243 }
   2244 
   2245 
   2246 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
   2247                                      int num_arguments,
   2248                                      int result_size) {
   2249   TailCallExternalReference(ExternalReference(fid, isolate()),
   2250                             num_arguments,
   2251                             result_size);
   2252 }
   2253 
   2254 
   2255 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
   2256 #if defined(__thumb__)
   2257   // Thumb mode builtin.
   2258   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
   2259 #endif
   2260   mov(r1, Operand(builtin));
   2261   CEntryStub stub(1);
   2262   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   2263 }
   2264 
   2265 
   2266 MaybeObject* MacroAssembler::TryJumpToExternalReference(
   2267     const ExternalReference& builtin) {
   2268 #if defined(__thumb__)
   2269   // Thumb mode builtin.
   2270   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
   2271 #endif
   2272   mov(r1, Operand(builtin));
   2273   CEntryStub stub(1);
   2274   return TryTailCallStub(&stub);
   2275 }
   2276 
   2277 
   2278 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
   2279                                    InvokeJSFlags flags,
   2280                                    CallWrapper* call_wrapper) {
   2281   GetBuiltinEntry(r2, id);
   2282   if (flags == CALL_JS) {
   2283     if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
   2284     Call(r2);
   2285     if (call_wrapper != NULL) call_wrapper->AfterCall();
   2286   } else {
   2287     ASSERT(flags == JUMP_JS);
   2288     Jump(r2);
   2289   }
   2290 }
   2291 
   2292 
   2293 void MacroAssembler::GetBuiltinFunction(Register target,
   2294                                         Builtins::JavaScript id) {
   2295   // Load the builtins object into target register.
   2296   ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   2297   ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
   2298   // Load the JavaScript builtin function from the builtins object.
   2299   ldr(target, FieldMemOperand(target,
   2300                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
   2301 }
   2302 
   2303 
   2304 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   2305   ASSERT(!target.is(r1));
   2306   GetBuiltinFunction(r1, id);
   2307   // Load the code entry point from the builtins object.
   2308   ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   2309 }
   2310 
   2311 
   2312 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
   2313                                 Register scratch1, Register scratch2) {
   2314   if (FLAG_native_code_counters && counter->Enabled()) {
   2315     mov(scratch1, Operand(value));
   2316     mov(scratch2, Operand(ExternalReference(counter)));
   2317     str(scratch1, MemOperand(scratch2));
   2318   }
   2319 }
   2320 
   2321 
   2322 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
   2323                                       Register scratch1, Register scratch2) {
   2324   ASSERT(value > 0);
   2325   if (FLAG_native_code_counters && counter->Enabled()) {
   2326     mov(scratch2, Operand(ExternalReference(counter)));
   2327     ldr(scratch1, MemOperand(scratch2));
   2328     add(scratch1, scratch1, Operand(value));
   2329     str(scratch1, MemOperand(scratch2));
   2330   }
   2331 }
   2332 
   2333 
   2334 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
   2335                                       Register scratch1, Register scratch2) {
   2336   ASSERT(value > 0);
   2337   if (FLAG_native_code_counters && counter->Enabled()) {
   2338     mov(scratch2, Operand(ExternalReference(counter)));
   2339     ldr(scratch1, MemOperand(scratch2));
   2340     sub(scratch1, scratch1, Operand(value));
   2341     str(scratch1, MemOperand(scratch2));
   2342   }
   2343 }
   2344 
   2345 
   2346 void MacroAssembler::Assert(Condition cond, const char* msg) {
   2347   if (emit_debug_code())
   2348     Check(cond, msg);
   2349 }
   2350 
   2351 
   2352 void MacroAssembler::AssertRegisterIsRoot(Register reg,
   2353                                           Heap::RootListIndex index) {
   2354   if (emit_debug_code()) {
   2355     LoadRoot(ip, index);
   2356     cmp(reg, ip);
   2357     Check(eq, "Register did not match expected root");
   2358   }
   2359 }
   2360 
   2361 
   2362 void MacroAssembler::AssertFastElements(Register elements) {
   2363   if (emit_debug_code()) {
   2364     ASSERT(!elements.is(ip));
   2365     Label ok;
   2366     push(elements);
   2367     ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
   2368     LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   2369     cmp(elements, ip);
   2370     b(eq, &ok);
   2371     LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
   2372     cmp(elements, ip);
   2373     b(eq, &ok);
   2374     Abort("JSObject with fast elements map has slow elements");
   2375     bind(&ok);
   2376     pop(elements);
   2377   }
   2378 }
   2379 
   2380 
   2381 void MacroAssembler::Check(Condition cond, const char* msg) {
   2382   Label L;
   2383   b(cond, &L);
   2384   Abort(msg);
   2385   // will not return here
   2386   bind(&L);
   2387 }
   2388 
   2389 
   2390 void MacroAssembler::Abort(const char* msg) {
   2391   Label abort_start;
   2392   bind(&abort_start);
   2393   // We want to pass the msg string like a smi to avoid GC
   2394   // problems, however msg is not guaranteed to be aligned
   2395   // properly. Instead, we pass an aligned pointer that is
   2396   // a proper v8 smi, but also pass the alignment difference
   2397   // from the real pointer as a smi.
   2398   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
   2399   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
   2400   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
   2401 #ifdef DEBUG
   2402   if (msg != NULL) {
   2403     RecordComment("Abort message: ");
   2404     RecordComment(msg);
   2405   }
   2406 #endif
   2407   // Disable stub call restrictions to always allow calls to abort.
   2408   AllowStubCallsScope allow_scope(this, true);
   2409 
   2410   mov(r0, Operand(p0));
   2411   push(r0);
   2412   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   2413   push(r0);
   2414   CallRuntime(Runtime::kAbort, 2);
   2415   // will not return here
   2416   if (is_const_pool_blocked()) {
   2417     // If the calling code cares about the exact number of
   2418     // instructions generated, we insert padding here to keep the size
   2419     // of the Abort macro constant.
   2420     static const int kExpectedAbortInstructions = 10;
   2421     int abort_instructions = InstructionsGeneratedSince(&abort_start);
   2422     ASSERT(abort_instructions <= kExpectedAbortInstructions);
   2423     while (abort_instructions++ < kExpectedAbortInstructions) {
   2424       nop();
   2425     }
   2426   }
   2427 }
   2428 
   2429 
   2430 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   2431   if (context_chain_length > 0) {
   2432     // Move up the chain of contexts to the context containing the slot.
   2433     ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
   2434     // Load the function context (which is the incoming, outer context).
   2435     ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
   2436     for (int i = 1; i < context_chain_length; i++) {
   2437       ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
   2438       ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
   2439     }
   2440   } else {
   2441     // Slot is in the current function context.  Move it into the
   2442     // destination register in case we store into it (the write barrier
   2443     // cannot be allowed to destroy the context in esi).
   2444     mov(dst, cp);
   2445   }
   2446 
   2447   // We should not have found a 'with' context by walking the context chain
   2448   // (i.e., the static scope chain and runtime context chain do not agree).
   2449   // A variable occurring in such a scope should have slot type LOOKUP and
   2450   // not CONTEXT.
   2451   if (emit_debug_code()) {
   2452     ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   2453     cmp(dst, ip);
   2454     Check(eq, "Yo dawg, I heard you liked function contexts "
   2455               "so I put function contexts in all your contexts");
   2456   }
   2457 }
   2458 
   2459 
   2460 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   2461   // Load the global or builtins object from the current context.
   2462   ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   2463   // Load the global context from the global or builtins object.
   2464   ldr(function, FieldMemOperand(function,
   2465                                 GlobalObject::kGlobalContextOffset));
   2466   // Load the function from the global context.
   2467   ldr(function, MemOperand(function, Context::SlotOffset(index)));
   2468 }
   2469 
   2470 
   2471 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   2472                                                   Register map,
   2473                                                   Register scratch) {
   2474   // Load the initial map. The global functions all have initial maps.
   2475   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2476   if (emit_debug_code()) {
   2477     Label ok, fail;
   2478     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
   2479     b(&ok);
   2480     bind(&fail);
   2481     Abort("Global functions must have initial map");
   2482     bind(&ok);
   2483   }
   2484 }
   2485 
   2486 
   2487 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
   2488     Register reg,
   2489     Register scratch,
   2490     Label* not_power_of_two_or_zero) {
   2491   sub(scratch, reg, Operand(1), SetCC);
   2492   b(mi, not_power_of_two_or_zero);
   2493   tst(scratch, reg);
   2494   b(ne, not_power_of_two_or_zero);
   2495 }
   2496 
   2497 
   2498 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
   2499     Register reg,
   2500     Register scratch,
   2501     Label* zero_and_neg,
   2502     Label* not_power_of_two) {
   2503   sub(scratch, reg, Operand(1), SetCC);
   2504   b(mi, zero_and_neg);
   2505   tst(scratch, reg);
   2506   b(ne, not_power_of_two);
   2507 }
   2508 
   2509 
   2510 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
   2511                                       Register reg2,
   2512                                       Label* on_not_both_smi) {
   2513   STATIC_ASSERT(kSmiTag == 0);
   2514   tst(reg1, Operand(kSmiTagMask));
   2515   tst(reg2, Operand(kSmiTagMask), eq);
   2516   b(ne, on_not_both_smi);
   2517 }
   2518 
   2519 
   2520 void MacroAssembler::JumpIfEitherSmi(Register reg1,
   2521                                      Register reg2,
   2522                                      Label* on_either_smi) {
   2523   STATIC_ASSERT(kSmiTag == 0);
   2524   tst(reg1, Operand(kSmiTagMask));
   2525   tst(reg2, Operand(kSmiTagMask), ne);
   2526   b(eq, on_either_smi);
   2527 }
   2528 
   2529 
   2530 void MacroAssembler::AbortIfSmi(Register object) {
   2531   STATIC_ASSERT(kSmiTag == 0);
   2532   tst(object, Operand(kSmiTagMask));
   2533   Assert(ne, "Operand is a smi");
   2534 }
   2535 
   2536 
   2537 void MacroAssembler::AbortIfNotSmi(Register object) {
   2538   STATIC_ASSERT(kSmiTag == 0);
   2539   tst(object, Operand(kSmiTagMask));
   2540   Assert(eq, "Operand is not smi");
   2541 }
   2542 
   2543 
   2544 void MacroAssembler::AbortIfNotString(Register object) {
   2545   STATIC_ASSERT(kSmiTag == 0);
   2546   tst(object, Operand(kSmiTagMask));
   2547   Assert(ne, "Operand is not a string");
   2548   push(object);
   2549   ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
   2550   CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
   2551   pop(object);
   2552   Assert(lo, "Operand is not a string");
   2553 }
   2554 
   2555 
   2556 
   2557 void MacroAssembler::AbortIfNotRootValue(Register src,
   2558                                          Heap::RootListIndex root_value_index,
   2559                                          const char* message) {
   2560   CompareRoot(src, root_value_index);
   2561   Assert(eq, message);
   2562 }
   2563 
   2564 
   2565 void MacroAssembler::JumpIfNotHeapNumber(Register object,
   2566                                          Register heap_number_map,
   2567                                          Register scratch,
   2568                                          Label* on_not_heap_number) {
   2569   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   2570   AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   2571   cmp(scratch, heap_number_map);
   2572   b(ne, on_not_heap_number);
   2573 }
   2574 
   2575 
   2576 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
   2577     Register first,
   2578     Register second,
   2579     Register scratch1,
   2580     Register scratch2,
   2581     Label* failure) {
   2582   // Test that both first and second are sequential ASCII strings.
   2583   // Assume that they are non-smis.
   2584   ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   2585   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   2586   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   2587   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
   2588 
   2589   JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
   2590                                                scratch2,
   2591                                                scratch1,
   2592                                                scratch2,
   2593                                                failure);
   2594 }
   2595 
   2596 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
   2597                                                          Register second,
   2598                                                          Register scratch1,
   2599                                                          Register scratch2,
   2600                                                          Label* failure) {
   2601   // Check that neither is a smi.
   2602   STATIC_ASSERT(kSmiTag == 0);
   2603   and_(scratch1, first, Operand(second));
   2604   tst(scratch1, Operand(kSmiTagMask));
   2605   b(eq, failure);
   2606   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
   2607                                              second,
   2608                                              scratch1,
   2609                                              scratch2,
   2610                                              failure);
   2611 }
   2612 
   2613 
   2614 // Allocates a heap number or jumps to the need_gc label if the young space
   2615 // is full and a scavenge is needed.
   2616 void MacroAssembler::AllocateHeapNumber(Register result,
   2617                                         Register scratch1,
   2618                                         Register scratch2,
   2619                                         Register heap_number_map,
   2620                                         Label* gc_required) {
   2621   // Allocate an object in the heap for the heap number and tag it as a heap
   2622   // object.
   2623   AllocateInNewSpace(HeapNumber::kSize,
   2624                      result,
   2625                      scratch1,
   2626                      scratch2,
   2627                      gc_required,
   2628                      TAG_OBJECT);
   2629 
   2630   // Store heap number map in the allocated object.
   2631   AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   2632   str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
   2633 }
   2634 
   2635 
   2636 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
   2637                                                  DwVfpRegister value,
   2638                                                  Register scratch1,
   2639                                                  Register scratch2,
   2640                                                  Register heap_number_map,
   2641                                                  Label* gc_required) {
   2642   AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
   2643   sub(scratch1, result, Operand(kHeapObjectTag));
   2644   vstr(value, scratch1, HeapNumber::kValueOffset);
   2645 }
   2646 
   2647 
   2648 // Copies a fixed number of fields of heap objects from src to dst.
   2649 void MacroAssembler::CopyFields(Register dst,
   2650                                 Register src,
   2651                                 RegList temps,
   2652                                 int field_count) {
   2653   // At least one bit set in the first 15 registers.
   2654   ASSERT((temps & ((1 << 15) - 1)) != 0);
   2655   ASSERT((temps & dst.bit()) == 0);
   2656   ASSERT((temps & src.bit()) == 0);
   2657   // Primitive implementation using only one temporary register.
   2658 
   2659   Register tmp = no_reg;
   2660   // Find a temp register in temps list.
   2661   for (int i = 0; i < 15; i++) {
   2662     if ((temps & (1 << i)) != 0) {
   2663       tmp.set_code(i);
   2664       break;
   2665     }
   2666   }
   2667   ASSERT(!tmp.is(no_reg));
   2668 
   2669   for (int i = 0; i < field_count; i++) {
   2670     ldr(tmp, FieldMemOperand(src, i * kPointerSize));
   2671     str(tmp, FieldMemOperand(dst, i * kPointerSize));
   2672   }
   2673 }
   2674 
   2675 
   2676 void MacroAssembler::CopyBytes(Register src,
   2677                                Register dst,
   2678                                Register length,
   2679                                Register scratch) {
   2680   Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
   2681 
   2682   // Align src before copying in word size chunks.
   2683   bind(&align_loop);
   2684   cmp(length, Operand(0));
   2685   b(eq, &done);
   2686   bind(&align_loop_1);
   2687   tst(src, Operand(kPointerSize - 1));
   2688   b(eq, &word_loop);
   2689   ldrb(scratch, MemOperand(src, 1, PostIndex));
   2690   strb(scratch, MemOperand(dst, 1, PostIndex));
   2691   sub(length, length, Operand(1), SetCC);
   2692   b(ne, &byte_loop_1);
   2693 
   2694   // Copy bytes in word size chunks.
   2695   bind(&word_loop);
   2696   if (emit_debug_code()) {
   2697     tst(src, Operand(kPointerSize - 1));
   2698     Assert(eq, "Expecting alignment for CopyBytes");
   2699   }
   2700   cmp(length, Operand(kPointerSize));
   2701   b(lt, &byte_loop);
   2702   ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
   2703 #if CAN_USE_UNALIGNED_ACCESSES
   2704   str(scratch, MemOperand(dst, kPointerSize, PostIndex));
   2705 #else
   2706   strb(scratch, MemOperand(dst, 1, PostIndex));
   2707   mov(scratch, Operand(scratch, LSR, 8));
   2708   strb(scratch, MemOperand(dst, 1, PostIndex));
   2709   mov(scratch, Operand(scratch, LSR, 8));
   2710   strb(scratch, MemOperand(dst, 1, PostIndex));
   2711   mov(scratch, Operand(scratch, LSR, 8));
   2712   strb(scratch, MemOperand(dst, 1, PostIndex));
   2713 #endif
   2714   sub(length, length, Operand(kPointerSize));
   2715   b(&word_loop);
   2716 
   2717   // Copy the last bytes if any left.
   2718   bind(&byte_loop);
   2719   cmp(length, Operand(0));
   2720   b(eq, &done);
   2721   bind(&byte_loop_1);
   2722   ldrb(scratch, MemOperand(src, 1, PostIndex));
   2723   strb(scratch, MemOperand(dst, 1, PostIndex));
   2724   sub(length, length, Operand(1), SetCC);
   2725   b(ne, &byte_loop_1);
   2726   bind(&done);
   2727 }
   2728 
   2729 
   2730 void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
   2731                                        Register source,  // Input.
   2732                                        Register scratch) {
   2733   ASSERT(!zeros.is(source) || !source.is(scratch));
   2734   ASSERT(!zeros.is(scratch));
   2735   ASSERT(!scratch.is(ip));
   2736   ASSERT(!source.is(ip));
   2737   ASSERT(!zeros.is(ip));
   2738 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   2739   clz(zeros, source);  // This instruction is only supported after ARM5.
   2740 #else
   2741   mov(zeros, Operand(0, RelocInfo::NONE));
   2742   Move(scratch, source);
   2743   // Top 16.
   2744   tst(scratch, Operand(0xffff0000));
   2745   add(zeros, zeros, Operand(16), LeaveCC, eq);
   2746   mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
   2747   // Top 8.
   2748   tst(scratch, Operand(0xff000000));
   2749   add(zeros, zeros, Operand(8), LeaveCC, eq);
   2750   mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
   2751   // Top 4.
   2752   tst(scratch, Operand(0xf0000000));
   2753   add(zeros, zeros, Operand(4), LeaveCC, eq);
   2754   mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
   2755   // Top 2.
   2756   tst(scratch, Operand(0xc0000000));
   2757   add(zeros, zeros, Operand(2), LeaveCC, eq);
   2758   mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
   2759   // Top bit.
   2760   tst(scratch, Operand(0x80000000u));
   2761   add(zeros, zeros, Operand(1), LeaveCC, eq);
   2762 #endif
   2763 }
   2764 
   2765 
   2766 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
   2767     Register first,
   2768     Register second,
   2769     Register scratch1,
   2770     Register scratch2,
   2771     Label* failure) {
   2772   int kFlatAsciiStringMask =
   2773       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   2774   int kFlatAsciiStringTag = ASCII_STRING_TYPE;
   2775   and_(scratch1, first, Operand(kFlatAsciiStringMask));
   2776   and_(scratch2, second, Operand(kFlatAsciiStringMask));
   2777   cmp(scratch1, Operand(kFlatAsciiStringTag));
   2778   // Ignore second test if first test failed.
   2779   cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
   2780   b(ne, failure);
   2781 }
   2782 
   2783 
   2784 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
   2785                                                             Register scratch,
   2786                                                             Label* failure) {
   2787   int kFlatAsciiStringMask =
   2788       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   2789   int kFlatAsciiStringTag = ASCII_STRING_TYPE;
   2790   and_(scratch, type, Operand(kFlatAsciiStringMask));
   2791   cmp(scratch, Operand(kFlatAsciiStringTag));
   2792   b(ne, failure);
   2793 }
   2794 
   2795 static const int kRegisterPassedArguments = 4;
   2796 
   2797 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   2798   int frame_alignment = ActivationFrameAlignment();
   2799 
   2800   // Up to four simple arguments are passed in registers r0..r3.
   2801   int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
   2802                                0 : num_arguments - kRegisterPassedArguments;
   2803   if (frame_alignment > kPointerSize) {
   2804     // Make stack end at alignment and make room for num_arguments - 4 words
   2805     // and the original value of sp.
   2806     mov(scratch, sp);
   2807     sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
   2808     ASSERT(IsPowerOf2(frame_alignment));
   2809     and_(sp, sp, Operand(-frame_alignment));
   2810     str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
   2811   } else {
   2812     sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
   2813   }
   2814 }
   2815 
   2816 
   2817 void MacroAssembler::CallCFunction(ExternalReference function,
   2818                                    int num_arguments) {
   2819   CallCFunctionHelper(no_reg, function, ip, num_arguments);
   2820 }
   2821 
   2822 void MacroAssembler::CallCFunction(Register function,
   2823                                    Register scratch,
   2824                                    int num_arguments) {
   2825   CallCFunctionHelper(function,
   2826                       ExternalReference::the_hole_value_location(isolate()),
   2827                       scratch,
   2828                       num_arguments);
   2829 }
   2830 
   2831 
   2832 void MacroAssembler::CallCFunctionHelper(Register function,
   2833                                          ExternalReference function_reference,
   2834                                          Register scratch,
   2835                                          int num_arguments) {
   2836   // Make sure that the stack is aligned before calling a C function unless
   2837   // running in the simulator. The simulator has its own alignment check which
   2838   // provides more information.
   2839 #if defined(V8_HOST_ARCH_ARM)
   2840   if (emit_debug_code()) {
   2841     int frame_alignment = OS::ActivationFrameAlignment();
   2842     int frame_alignment_mask = frame_alignment - 1;
   2843     if (frame_alignment > kPointerSize) {
   2844       ASSERT(IsPowerOf2(frame_alignment));
   2845       Label alignment_as_expected;
   2846       tst(sp, Operand(frame_alignment_mask));
   2847       b(eq, &alignment_as_expected);
   2848       // Don't use Check here, as it will call Runtime_Abort possibly
   2849       // re-entering here.
   2850       stop("Unexpected alignment");
   2851       bind(&alignment_as_expected);
   2852     }
   2853   }
   2854 #endif
   2855 
   2856   // Just call directly. The function called cannot cause a GC, or
   2857   // allow preemption, so the return address in the link register
   2858   // stays correct.
   2859   if (function.is(no_reg)) {
   2860     mov(scratch, Operand(function_reference));
   2861     function = scratch;
   2862   }
   2863   Call(function);
   2864   int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
   2865                                0 : num_arguments - kRegisterPassedArguments;
   2866   if (OS::ActivationFrameAlignment() > kPointerSize) {
   2867     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
   2868   } else {
   2869     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
   2870   }
   2871 }
   2872 
   2873 
   2874 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
   2875                                Register result) {
   2876   const uint32_t kLdrOffsetMask = (1 << 12) - 1;
   2877   const int32_t kPCRegOffset = 2 * kPointerSize;
   2878   ldr(result, MemOperand(ldr_location));
   2879   if (emit_debug_code()) {
   2880     // Check that the instruction is a ldr reg, [pc + offset] .
   2881     and_(result, result, Operand(kLdrPCPattern));
   2882     cmp(result, Operand(kLdrPCPattern));
   2883     Check(eq, "The instruction to patch should be a load from pc.");
   2884     // Result was clobbered. Restore it.
   2885     ldr(result, MemOperand(ldr_location));
   2886   }
   2887   // Get the address of the constant.
   2888   and_(result, result, Operand(kLdrOffsetMask));
   2889   add(result, ldr_location, Operand(result));
   2890   add(result, result, Operand(kPCRegOffset));
   2891 }
   2892 
   2893 
   2894 CodePatcher::CodePatcher(byte* address, int instructions)
   2895     : address_(address),
   2896       instructions_(instructions),
   2897       size_(instructions * Assembler::kInstrSize),
   2898       masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
   2899   // Create a new macro assembler pointing to the address of the code to patch.
   2900   // The size is adjusted with kGap on order for the assembler to generate size
   2901   // bytes of instructions without failing with buffer size constraints.
   2902   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   2903 }
   2904 
   2905 
   2906 CodePatcher::~CodePatcher() {
   2907   // Indicate that code has changed.
   2908   CPU::FlushICache(address_, size_);
   2909 
   2910   // Check that the code was patched as expected.
   2911   ASSERT(masm_.pc_ == address_ + size_);
   2912   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   2913 }
   2914 
   2915 
   2916 void CodePatcher::Emit(Instr instr) {
   2917   masm()->emit(instr);
   2918 }
   2919 
   2920 
   2921 void CodePatcher::Emit(Address addr) {
   2922   masm()->emit(reinterpret_cast<Instr>(addr));
   2923 }
   2924 
   2925 
   2926 void CodePatcher::EmitCondition(Condition cond) {
   2927   Instr instr = Assembler::instr_at(masm_.pc_);
   2928   instr = (instr & ~kCondMask) | cond;
   2929   masm_.emit(instr);
   2930 }
   2931 
   2932 
   2933 } }  // namespace v8::internal
   2934 
   2935 #endif  // V8_TARGET_ARCH_ARM
   2936