Home | History | Annotate | Download | only in arm
      1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "bootstrapper.h"
     31 #include "codegen-inl.h"
     32 #include "debug.h"
     33 #include "runtime.h"
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 MacroAssembler::MacroAssembler(void* buffer, int size)
     39     : Assembler(buffer, size),
     40       generating_stub_(false),
     41       allow_stub_calls_(true),
     42       code_object_(Heap::undefined_value()) {
     43 }
     44 
     45 
     46 // We always generate arm code, never thumb code, even if V8 is compiled to
     47 // thumb, so we require inter-working support
     48 #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
     49 #error "flag -mthumb-interwork missing"
     50 #endif
     51 
     52 
     53 // We do not support thumb inter-working with an arm architecture not supporting
     54 // the blx instruction (below v5t).  If you know what CPU you are compiling for
     55 // you can use -march=armv7 or similar.
     56 #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
     57 # error "For thumb inter-working we require an architecture which supports blx"
     58 #endif
     59 
     60 
     61 // Using blx may yield better code, so use it when required or when available
     62 #if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
     63 #define USE_BLX 1
     64 #endif
     65 
     66 // Using bx does not yield better code, so use it only when required
     67 #if defined(USE_THUMB_INTERWORK)
     68 #define USE_BX 1
     69 #endif
     70 
     71 
     72 void MacroAssembler::Jump(Register target, Condition cond) {
     73 #if USE_BX
     74   bx(target, cond);
     75 #else
     76   mov(pc, Operand(target), LeaveCC, cond);
     77 #endif
     78 }
     79 
     80 
     81 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
     82                           Condition cond) {
     83 #if USE_BX
     84   mov(ip, Operand(target, rmode), LeaveCC, cond);
     85   bx(ip, cond);
     86 #else
     87   mov(pc, Operand(target, rmode), LeaveCC, cond);
     88 #endif
     89 }
     90 
     91 
     92 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
     93                           Condition cond) {
     94   ASSERT(!RelocInfo::IsCodeTarget(rmode));
     95   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
     96 }
     97 
     98 
     99 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
    100                           Condition cond) {
    101   ASSERT(RelocInfo::IsCodeTarget(rmode));
    102   // 'code' is always generated ARM code, never THUMB code
    103   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
    104 }
    105 
    106 
    107 void MacroAssembler::Call(Register target, Condition cond) {
    108 #if USE_BLX
    109   blx(target, cond);
    110 #else
    111   // set lr for return at current pc + 8
    112   mov(lr, Operand(pc), LeaveCC, cond);
    113   mov(pc, Operand(target), LeaveCC, cond);
    114 #endif
    115 }
    116 
    117 
    118 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
    119                           Condition cond) {
    120   // Set lr for return at current pc + 8.
    121   mov(lr, Operand(pc), LeaveCC, cond);
    122   // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
    123   mov(pc, Operand(target, rmode), LeaveCC, cond);
    124   // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
    125   // 'blx ip'; however, the code would not be shorter than the above sequence
    126   // and the target address of the call would be referenced by the first
    127   // instruction rather than the second one, which would make it harder to patch
    128   // (two instructions before the return address, instead of one).
    129   ASSERT(kCallTargetAddressOffset == kInstrSize);
    130 }
    131 
    132 
    133 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
    134                           Condition cond) {
    135   ASSERT(!RelocInfo::IsCodeTarget(rmode));
    136   Call(reinterpret_cast<intptr_t>(target), rmode, cond);
    137 }
    138 
    139 
    140 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
    141                           Condition cond) {
    142   ASSERT(RelocInfo::IsCodeTarget(rmode));
    143   // 'code' is always generated ARM code, never THUMB code
    144   Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
    145 }
    146 
    147 
    148 void MacroAssembler::Ret(Condition cond) {
    149 #if USE_BX
    150   bx(lr, cond);
    151 #else
    152   mov(pc, Operand(lr), LeaveCC, cond);
    153 #endif
    154 }
    155 
    156 
    157 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
    158   LoadRoot(ip, Heap::kStackLimitRootIndex);
    159   cmp(sp, Operand(ip));
    160   b(lo, on_stack_overflow);
    161 }
    162 
    163 
    164 void MacroAssembler::Drop(int count, Condition cond) {
    165   if (count > 0) {
    166     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
    167   }
    168 }
    169 
    170 
    171 void MacroAssembler::Call(Label* target) {
    172   bl(target);
    173 }
    174 
    175 
    176 void MacroAssembler::Move(Register dst, Handle<Object> value) {
    177   mov(dst, Operand(value));
    178 }
    179 
    180 
    181 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
    182   // Empty the const pool.
    183   CheckConstPool(true, true);
    184   add(pc, pc, Operand(index,
    185                       LSL,
    186                       assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
    187   BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
    188   nop();  // Jump table alignment.
    189   for (int i = 0; i < targets.length(); i++) {
    190     b(targets[i]);
    191   }
    192 }
    193 
    194 
    195 void MacroAssembler::LoadRoot(Register destination,
    196                               Heap::RootListIndex index,
    197                               Condition cond) {
    198   ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
    199 }
    200 
    201 
    202 // Will clobber 4 registers: object, offset, scratch, ip.  The
    203 // register 'object' contains a heap object pointer.  The heap object
    204 // tag is shifted away.
    205 void MacroAssembler::RecordWrite(Register object, Register offset,
    206                                  Register scratch) {
    207   // The compiled code assumes that record write doesn't change the
    208   // context register, so we check that none of the clobbered
    209   // registers are cp.
    210   ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
    211 
    212   // This is how much we shift the remembered set bit offset to get the
    213   // offset of the word in the remembered set.  We divide by kBitsPerInt (32,
    214   // shift right 5) and then multiply by kIntSize (4, shift left 2).
    215   const int kRSetWordShift = 3;
    216 
    217   Label fast, done;
    218 
    219   // First, test that the object is not in the new space.  We cannot set
    220   // remembered set bits in the new space.
    221   // object: heap object pointer (with tag)
    222   // offset: offset to store location from the object
    223   and_(scratch, object, Operand(ExternalReference::new_space_mask()));
    224   cmp(scratch, Operand(ExternalReference::new_space_start()));
    225   b(eq, &done);
    226 
    227   // Compute the bit offset in the remembered set.
    228   // object: heap object pointer (with tag)
    229   // offset: offset to store location from the object
    230   mov(ip, Operand(Page::kPageAlignmentMask));  // load mask only once
    231   and_(scratch, object, Operand(ip));  // offset into page of the object
    232   add(offset, scratch, Operand(offset));  // add offset into the object
    233   mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
    234 
    235   // Compute the page address from the heap object pointer.
    236   // object: heap object pointer (with tag)
    237   // offset: bit offset of store position in the remembered set
    238   bic(object, object, Operand(ip));
    239 
    240   // If the bit offset lies beyond the normal remembered set range, it is in
    241   // the extra remembered set area of a large object.
    242   // object: page start
    243   // offset: bit offset of store position in the remembered set
    244   cmp(offset, Operand(Page::kPageSize / kPointerSize));
    245   b(lt, &fast);
    246 
    247   // Adjust the bit offset to be relative to the start of the extra
    248   // remembered set and the start address to be the address of the extra
    249   // remembered set.
    250   sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
    251   // Load the array length into 'scratch' and multiply by four to get the
    252   // size in bytes of the elements.
    253   ldr(scratch, MemOperand(object, Page::kObjectStartOffset
    254                                   + FixedArray::kLengthOffset));
    255   mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
    256   // Add the page header (including remembered set), array header, and array
    257   // body size to the page address.
    258   add(object, object, Operand(Page::kObjectStartOffset
    259                               + FixedArray::kHeaderSize));
    260   add(object, object, Operand(scratch));
    261 
    262   bind(&fast);
    263   // Get address of the rset word.
    264   // object: start of the remembered set (page start for the fast case)
    265   // offset: bit offset of store position in the remembered set
    266   bic(scratch, offset, Operand(kBitsPerInt - 1));  // clear the bit offset
    267   add(object, object, Operand(scratch, LSR, kRSetWordShift));
    268   // Get bit offset in the rset word.
    269   // object: address of remembered set word
    270   // offset: bit offset of store position
    271   and_(offset, offset, Operand(kBitsPerInt - 1));
    272 
    273   ldr(scratch, MemOperand(object));
    274   mov(ip, Operand(1));
    275   orr(scratch, scratch, Operand(ip, LSL, offset));
    276   str(scratch, MemOperand(object));
    277 
    278   bind(&done);
    279 
    280   // Clobber all input registers when running with the debug-code flag
    281   // turned on to provoke errors.
    282   if (FLAG_debug_code) {
    283     mov(object, Operand(bit_cast<int32_t>(kZapValue)));
    284     mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
    285     mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
    286   }
    287 }
    288 
    289 
    290 void MacroAssembler::EnterFrame(StackFrame::Type type) {
    291   // r0-r3: preserved
    292   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
    293   mov(ip, Operand(Smi::FromInt(type)));
    294   push(ip);
    295   mov(ip, Operand(CodeObject()));
    296   push(ip);
    297   add(fp, sp, Operand(3 * kPointerSize));  // Adjust FP to point to saved FP.
    298 }
    299 
    300 
    301 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
    302   // r0: preserved
    303   // r1: preserved
    304   // r2: preserved
    305 
    306   // Drop the execution stack down to the frame pointer and restore
    307   // the caller frame pointer and return address.
    308   mov(sp, fp);
    309   ldm(ia_w, sp, fp.bit() | lr.bit());
    310 }
    311 
    312 
    313 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
    314   // Compute the argv pointer and keep it in a callee-saved register.
    315   // r0 is argc.
    316   add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
    317   sub(r6, r6, Operand(kPointerSize));
    318 
    319   // Compute callee's stack pointer before making changes and save it as
    320   // ip register so that it is restored as sp register on exit, thereby
    321   // popping the args.
    322 
    323   // ip = sp + kPointerSize * #args;
    324   add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
    325 
    326   // Align the stack at this point.  After this point we have 5 pushes,
    327   // so in fact we have to unalign here!  See also the assert on the
    328   // alignment in AlignStack.
    329   AlignStack(1);
    330 
    331   // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
    332   stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
    333   mov(fp, Operand(sp));  // Setup new frame pointer.
    334 
    335   mov(ip, Operand(CodeObject()));
    336   push(ip);  // Accessed from ExitFrame::code_slot.
    337 
    338   // Save the frame pointer and the context in top.
    339   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
    340   str(fp, MemOperand(ip));
    341   mov(ip, Operand(ExternalReference(Top::k_context_address)));
    342   str(cp, MemOperand(ip));
    343 
    344   // Setup argc and the builtin function in callee-saved registers.
    345   mov(r4, Operand(r0));
    346   mov(r5, Operand(r1));
    347 
    348 
    349 #ifdef ENABLE_DEBUGGER_SUPPORT
    350   // Save the state of all registers to the stack from the memory
    351   // location. This is needed to allow nested break points.
    352   if (mode == ExitFrame::MODE_DEBUG) {
    353     // Use sp as base to push.
    354     CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
    355   }
    356 #endif
    357 }
    358 
    359 
    360 void MacroAssembler::AlignStack(int offset) {
    361 #if defined(V8_HOST_ARCH_ARM)
    362   // Running on the real platform. Use the alignment as mandated by the local
    363   // environment.
    364   // Note: This will break if we ever start generating snapshots on one ARM
    365   // platform for another ARM platform with a different alignment.
    366   int activation_frame_alignment = OS::ActivationFrameAlignment();
    367 #else  // defined(V8_HOST_ARCH_ARM)
    368   // If we are using the simulator then we should always align to the expected
    369   // alignment. As the simulator is used to generate snapshots we do not know
    370   // if the target platform will need alignment, so we will always align at
    371   // this point here.
    372   int activation_frame_alignment = 2 * kPointerSize;
    373 #endif  // defined(V8_HOST_ARCH_ARM)
    374   if (activation_frame_alignment != kPointerSize) {
    375     // This code needs to be made more general if this assert doesn't hold.
    376     ASSERT(activation_frame_alignment == 2 * kPointerSize);
    377     mov(r7, Operand(Smi::FromInt(0)));
    378     tst(sp, Operand(activation_frame_alignment - offset));
    379     push(r7, eq);  // Conditional push instruction.
    380   }
    381 }
    382 
    383 
    384 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
    385 #ifdef ENABLE_DEBUGGER_SUPPORT
    386   // Restore the memory copy of the registers by digging them out from
    387   // the stack. This is needed to allow nested break points.
    388   if (mode == ExitFrame::MODE_DEBUG) {
    389     // This code intentionally clobbers r2 and r3.
    390     const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
    391     const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
    392     add(r3, fp, Operand(kOffset));
    393     CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
    394   }
    395 #endif
    396 
    397   // Clear top frame.
    398   mov(r3, Operand(0));
    399   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
    400   str(r3, MemOperand(ip));
    401 
    402   // Restore current context from top and clear it in debug mode.
    403   mov(ip, Operand(ExternalReference(Top::k_context_address)));
    404   ldr(cp, MemOperand(ip));
    405 #ifdef DEBUG
    406   str(r3, MemOperand(ip));
    407 #endif
    408 
    409   // Pop the arguments, restore registers, and return.
    410   mov(sp, Operand(fp));  // respect ABI stack constraint
    411   ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
    412 }
    413 
    414 
    415 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
    416                                     const ParameterCount& actual,
    417                                     Handle<Code> code_constant,
    418                                     Register code_reg,
    419                                     Label* done,
    420                                     InvokeFlag flag) {
    421   bool definitely_matches = false;
    422   Label regular_invoke;
    423 
    424   // Check whether the expected and actual arguments count match. If not,
    425   // setup registers according to contract with ArgumentsAdaptorTrampoline:
    426   //  r0: actual arguments count
    427   //  r1: function (passed through to callee)
    428   //  r2: expected arguments count
    429   //  r3: callee code entry
    430 
    431   // The code below is made a lot easier because the calling code already sets
    432   // up actual and expected registers according to the contract if values are
    433   // passed in registers.
    434   ASSERT(actual.is_immediate() || actual.reg().is(r0));
    435   ASSERT(expected.is_immediate() || expected.reg().is(r2));
    436   ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
    437 
    438   if (expected.is_immediate()) {
    439     ASSERT(actual.is_immediate());
    440     if (expected.immediate() == actual.immediate()) {
    441       definitely_matches = true;
    442     } else {
    443       mov(r0, Operand(actual.immediate()));
    444       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
    445       if (expected.immediate() == sentinel) {
    446         // Don't worry about adapting arguments for builtins that
    447         // don't want that done. Skip adaption code by making it look
    448         // like we have a match between expected and actual number of
    449         // arguments.
    450         definitely_matches = true;
    451       } else {
    452         mov(r2, Operand(expected.immediate()));
    453       }
    454     }
    455   } else {
    456     if (actual.is_immediate()) {
    457       cmp(expected.reg(), Operand(actual.immediate()));
    458       b(eq, &regular_invoke);
    459       mov(r0, Operand(actual.immediate()));
    460     } else {
    461       cmp(expected.reg(), Operand(actual.reg()));
    462       b(eq, &regular_invoke);
    463     }
    464   }
    465 
    466   if (!definitely_matches) {
    467     if (!code_constant.is_null()) {
    468       mov(r3, Operand(code_constant));
    469       add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
    470     }
    471 
    472     Handle<Code> adaptor =
    473         Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
    474     if (flag == CALL_FUNCTION) {
    475       Call(adaptor, RelocInfo::CODE_TARGET);
    476       b(done);
    477     } else {
    478       Jump(adaptor, RelocInfo::CODE_TARGET);
    479     }
    480     bind(&regular_invoke);
    481   }
    482 }
    483 
    484 
    485 void MacroAssembler::InvokeCode(Register code,
    486                                 const ParameterCount& expected,
    487                                 const ParameterCount& actual,
    488                                 InvokeFlag flag) {
    489   Label done;
    490 
    491   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
    492   if (flag == CALL_FUNCTION) {
    493     Call(code);
    494   } else {
    495     ASSERT(flag == JUMP_FUNCTION);
    496     Jump(code);
    497   }
    498 
    499   // Continue here if InvokePrologue does handle the invocation due to
    500   // mismatched parameter counts.
    501   bind(&done);
    502 }
    503 
    504 
    505 void MacroAssembler::InvokeCode(Handle<Code> code,
    506                                 const ParameterCount& expected,
    507                                 const ParameterCount& actual,
    508                                 RelocInfo::Mode rmode,
    509                                 InvokeFlag flag) {
    510   Label done;
    511 
    512   InvokePrologue(expected, actual, code, no_reg, &done, flag);
    513   if (flag == CALL_FUNCTION) {
    514     Call(code, rmode);
    515   } else {
    516     Jump(code, rmode);
    517   }
    518 
    519   // Continue here if InvokePrologue does handle the invocation due to
    520   // mismatched parameter counts.
    521   bind(&done);
    522 }
    523 
    524 
    525 void MacroAssembler::InvokeFunction(Register fun,
    526                                     const ParameterCount& actual,
    527                                     InvokeFlag flag) {
    528   // Contract with called JS functions requires that function is passed in r1.
    529   ASSERT(fun.is(r1));
    530 
    531   Register expected_reg = r2;
    532   Register code_reg = r3;
    533 
    534   ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
    535   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
    536   ldr(expected_reg,
    537       FieldMemOperand(code_reg,
    538                       SharedFunctionInfo::kFormalParameterCountOffset));
    539   ldr(code_reg,
    540       MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
    541   add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
    542 
    543   ParameterCount expected(expected_reg);
    544   InvokeCode(code_reg, expected, actual, flag);
    545 }
    546 
    547 
    548 void MacroAssembler::InvokeFunction(JSFunction* function,
    549                                     const ParameterCount& actual,
    550                                     InvokeFlag flag) {
    551   ASSERT(function->is_compiled());
    552 
    553   // Get the function and setup the context.
    554   mov(r1, Operand(Handle<JSFunction>(function)));
    555   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
    556 
    557   // Invoke the cached code.
    558   Handle<Code> code(function->code());
    559   ParameterCount expected(function->shared()->formal_parameter_count());
    560   InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
    561 }
    562 
    563 #ifdef ENABLE_DEBUGGER_SUPPORT
    564 void MacroAssembler::SaveRegistersToMemory(RegList regs) {
    565   ASSERT((regs & ~kJSCallerSaved) == 0);
    566   // Copy the content of registers to memory location.
    567   for (int i = 0; i < kNumJSCallerSaved; i++) {
    568     int r = JSCallerSavedCode(i);
    569     if ((regs & (1 << r)) != 0) {
    570       Register reg = { r };
    571       mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
    572       str(reg, MemOperand(ip));
    573     }
    574   }
    575 }
    576 
    577 
    578 void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
    579   ASSERT((regs & ~kJSCallerSaved) == 0);
    580   // Copy the content of memory location to registers.
    581   for (int i = kNumJSCallerSaved; --i >= 0;) {
    582     int r = JSCallerSavedCode(i);
    583     if ((regs & (1 << r)) != 0) {
    584       Register reg = { r };
    585       mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
    586       ldr(reg, MemOperand(ip));
    587     }
    588   }
    589 }
    590 
    591 
    592 void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
    593                                                     RegList regs) {
    594   ASSERT((regs & ~kJSCallerSaved) == 0);
    595   // Copy the content of the memory location to the stack and adjust base.
    596   for (int i = kNumJSCallerSaved; --i >= 0;) {
    597     int r = JSCallerSavedCode(i);
    598     if ((regs & (1 << r)) != 0) {
    599       mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
    600       ldr(ip, MemOperand(ip));
    601       str(ip, MemOperand(base, 4, NegPreIndex));
    602     }
    603   }
    604 }
    605 
    606 
    607 void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
    608                                                     Register scratch,
    609                                                     RegList regs) {
    610   ASSERT((regs & ~kJSCallerSaved) == 0);
    611   // Copy the content of the stack to the memory location and adjust base.
    612   for (int i = 0; i < kNumJSCallerSaved; i++) {
    613     int r = JSCallerSavedCode(i);
    614     if ((regs & (1 << r)) != 0) {
    615       mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
    616       ldr(scratch, MemOperand(base, 4, PostIndex));
    617       str(scratch, MemOperand(ip));
    618     }
    619   }
    620 }
    621 
    622 
    623 void MacroAssembler::DebugBreak() {
    624   ASSERT(allow_stub_calls());
    625   mov(r0, Operand(0));
    626   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
    627   CEntryStub ces(1);
    628   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
    629 }
    630 #endif
    631 
    632 
    633 void MacroAssembler::PushTryHandler(CodeLocation try_location,
    634                                     HandlerType type) {
    635   // Adjust this code if not the case.
    636   ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
    637   // The pc (return address) is passed in register lr.
    638   if (try_location == IN_JAVASCRIPT) {
    639     if (type == TRY_CATCH_HANDLER) {
    640       mov(r3, Operand(StackHandler::TRY_CATCH));
    641     } else {
    642       mov(r3, Operand(StackHandler::TRY_FINALLY));
    643     }
    644     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
    645            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
    646            && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
    647     stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
    648     // Save the current handler as the next handler.
    649     mov(r3, Operand(ExternalReference(Top::k_handler_address)));
    650     ldr(r1, MemOperand(r3));
    651     ASSERT(StackHandlerConstants::kNextOffset == 0);
    652     push(r1);
    653     // Link this handler as the new current one.
    654     str(sp, MemOperand(r3));
    655   } else {
    656     // Must preserve r0-r4, r5-r7 are available.
    657     ASSERT(try_location == IN_JS_ENTRY);
    658     // The frame pointer does not point to a JS frame so we save NULL
    659     // for fp. We expect the code throwing an exception to check fp
    660     // before dereferencing it to restore the context.
    661     mov(ip, Operand(0));  // To save a NULL frame pointer.
    662     mov(r6, Operand(StackHandler::ENTRY));
    663     ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
    664            && StackHandlerConstants::kFPOffset == 2 * kPointerSize
    665            && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
    666     stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
    667     // Save the current handler as the next handler.
    668     mov(r7, Operand(ExternalReference(Top::k_handler_address)));
    669     ldr(r6, MemOperand(r7));
    670     ASSERT(StackHandlerConstants::kNextOffset == 0);
    671     push(r6);
    672     // Link this handler as the new current one.
    673     str(sp, MemOperand(r7));
    674   }
    675 }
    676 
    677 
    678 void MacroAssembler::PopTryHandler() {
    679   ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
    680   pop(r1);
    681   mov(ip, Operand(ExternalReference(Top::k_handler_address)));
    682   add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
    683   str(r1, MemOperand(ip));
    684 }
    685 
    686 
    687 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
    688                                    JSObject* holder, Register holder_reg,
    689                                    Register scratch,
    690                                    Label* miss) {
    691   // Make sure there's no overlap between scratch and the other
    692   // registers.
    693   ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
    694 
    695   // Keep track of the current object in register reg.
    696   Register reg = object_reg;
    697   int depth = 1;
    698 
    699   // Check the maps in the prototype chain.
    700   // Traverse the prototype chain from the object and do map checks.
    701   while (object != holder) {
    702     depth++;
    703 
    704     // Only global objects and objects that do not require access
    705     // checks are allowed in stubs.
    706     ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
    707 
    708     // Get the map of the current object.
    709     ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
    710     cmp(scratch, Operand(Handle<Map>(object->map())));
    711 
    712     // Branch on the result of the map check.
    713     b(ne, miss);
    714 
    715     // Check access rights to the global object.  This has to happen
    716     // after the map check so that we know that the object is
    717     // actually a global object.
    718     if (object->IsJSGlobalProxy()) {
    719       CheckAccessGlobalProxy(reg, scratch, miss);
    720       // Restore scratch register to be the map of the object.  In the
    721       // new space case below, we load the prototype from the map in
    722       // the scratch register.
    723       ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
    724     }
    725 
    726     reg = holder_reg;  // from now the object is in holder_reg
    727     JSObject* prototype = JSObject::cast(object->GetPrototype());
    728     if (Heap::InNewSpace(prototype)) {
    729       // The prototype is in new space; we cannot store a reference
    730       // to it in the code. Load it from the map.
    731       ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
    732     } else {
    733       // The prototype is in old space; load it directly.
    734       mov(reg, Operand(Handle<JSObject>(prototype)));
    735     }
    736 
    737     // Go to the next object in the prototype chain.
    738     object = prototype;
    739   }
    740 
    741   // Check the holder map.
    742   ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
    743   cmp(scratch, Operand(Handle<Map>(object->map())));
    744   b(ne, miss);
    745 
    746   // Log the check depth.
    747   LOG(IntEvent("check-maps-depth", depth));
    748 
    749   // Perform security check for access to the global object and return
    750   // the holder register.
    751   ASSERT(object == holder);
    752   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
    753   if (object->IsJSGlobalProxy()) {
    754     CheckAccessGlobalProxy(reg, scratch, miss);
    755   }
    756   return reg;
    757 }
    758 
    759 
    760 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
    761                                             Register scratch,
    762                                             Label* miss) {
    763   Label same_contexts;
    764 
    765   ASSERT(!holder_reg.is(scratch));
    766   ASSERT(!holder_reg.is(ip));
    767   ASSERT(!scratch.is(ip));
    768 
    769   // Load current lexical context from the stack frame.
    770   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
    771   // In debug mode, make sure the lexical context is set.
    772 #ifdef DEBUG
    773   cmp(scratch, Operand(0));
    774   Check(ne, "we should not have an empty lexical context");
    775 #endif
    776 
    777   // Load the global context of the current context.
    778   int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
    779   ldr(scratch, FieldMemOperand(scratch, offset));
    780   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
    781 
    782   // Check the context is a global context.
    783   if (FLAG_debug_code) {
    784     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
    785     // Cannot use ip as a temporary in this verification code. Due to the fact
    786     // that ip is clobbered as part of cmp with an object Operand.
    787     push(holder_reg);  // Temporarily save holder on the stack.
    788     // Read the first word and compare to the global_context_map.
    789     ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
    790     LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
    791     cmp(holder_reg, ip);
    792     Check(eq, "JSGlobalObject::global_context should be a global context.");
    793     pop(holder_reg);  // Restore holder.
    794   }
    795 
    796   // Check if both contexts are the same.
    797   ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
    798   cmp(scratch, Operand(ip));
    799   b(eq, &same_contexts);
    800 
    801   // Check the context is a global context.
    802   if (FLAG_debug_code) {
    803     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
    804     // Cannot use ip as a temporary in this verification code. Due to the fact
    805     // that ip is clobbered as part of cmp with an object Operand.
    806     push(holder_reg);  // Temporarily save holder on the stack.
    807     mov(holder_reg, ip);  // Move ip to its holding place.
    808     LoadRoot(ip, Heap::kNullValueRootIndex);
    809     cmp(holder_reg, ip);
    810     Check(ne, "JSGlobalProxy::context() should not be null.");
    811 
    812     ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
    813     LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
    814     cmp(holder_reg, ip);
    815     Check(eq, "JSGlobalObject::global_context should be a global context.");
    816     // Restore ip is not needed. ip is reloaded below.
    817     pop(holder_reg);  // Restore holder.
    818     // Restore ip to holder's context.
    819     ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
    820   }
    821 
    822   // Check that the security token in the calling global object is
    823   // compatible with the security token in the receiving global
    824   // object.
    825   int token_offset = Context::kHeaderSize +
    826                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
    827 
    828   ldr(scratch, FieldMemOperand(scratch, token_offset));
    829   ldr(ip, FieldMemOperand(ip, token_offset));
    830   cmp(scratch, Operand(ip));
    831   b(ne, miss);
    832 
    833   bind(&same_contexts);
    834 }
    835 
    836 
    837 void MacroAssembler::AllocateInNewSpace(int object_size,
    838                                         Register result,
    839                                         Register scratch1,
    840                                         Register scratch2,
    841                                         Label* gc_required,
    842                                         AllocationFlags flags) {
    843   ASSERT(!result.is(scratch1));
    844   ASSERT(!scratch1.is(scratch2));
    845 
    846   // Load address of new object into result and allocation top address into
    847   // scratch1.
    848   ExternalReference new_space_allocation_top =
    849       ExternalReference::new_space_allocation_top_address();
    850   mov(scratch1, Operand(new_space_allocation_top));
    851   if ((flags & RESULT_CONTAINS_TOP) == 0) {
    852     ldr(result, MemOperand(scratch1));
    853   } else if (FLAG_debug_code) {
    854     // Assert that result actually contains top on entry. scratch2 is used
    855     // immediately below so this use of scratch2 does not cause difference with
    856     // respect to register content between debug and release mode.
    857     ldr(scratch2, MemOperand(scratch1));
    858     cmp(result, scratch2);
    859     Check(eq, "Unexpected allocation top");
    860   }
    861 
    862   // Calculate new top and bail out if new space is exhausted. Use result
    863   // to calculate the new top.
    864   ExternalReference new_space_allocation_limit =
    865       ExternalReference::new_space_allocation_limit_address();
    866   mov(scratch2, Operand(new_space_allocation_limit));
    867   ldr(scratch2, MemOperand(scratch2));
    868   add(result, result, Operand(object_size * kPointerSize));
    869   cmp(result, Operand(scratch2));
    870   b(hi, gc_required);
    871 
    872   // Update allocation top. result temporarily holds the new top.
    873   if (FLAG_debug_code) {
    874     tst(result, Operand(kObjectAlignmentMask));
    875     Check(eq, "Unaligned allocation in new space");
    876   }
    877   str(result, MemOperand(scratch1));
    878 
    879   // Tag and adjust back to start of new object.
    880   if ((flags & TAG_OBJECT) != 0) {
    881     sub(result, result, Operand((object_size * kPointerSize) -
    882                                 kHeapObjectTag));
    883   } else {
    884     sub(result, result, Operand(object_size * kPointerSize));
    885   }
    886 }
    887 
    888 
    889 void MacroAssembler::AllocateInNewSpace(Register object_size,
    890                                         Register result,
    891                                         Register scratch1,
    892                                         Register scratch2,
    893                                         Label* gc_required,
    894                                         AllocationFlags flags) {
    895   ASSERT(!result.is(scratch1));
    896   ASSERT(!scratch1.is(scratch2));
    897 
    898   // Load address of new object into result and allocation top address into
    899   // scratch1.
    900   ExternalReference new_space_allocation_top =
    901       ExternalReference::new_space_allocation_top_address();
    902   mov(scratch1, Operand(new_space_allocation_top));
    903   if ((flags & RESULT_CONTAINS_TOP) == 0) {
    904     ldr(result, MemOperand(scratch1));
    905   } else if (FLAG_debug_code) {
    906     // Assert that result actually contains top on entry. scratch2 is used
    907     // immediately below so this use of scratch2 does not cause difference with
    908     // respect to register content between debug and release mode.
    909     ldr(scratch2, MemOperand(scratch1));
    910     cmp(result, scratch2);
    911     Check(eq, "Unexpected allocation top");
    912   }
    913 
    914   // Calculate new top and bail out if new space is exhausted. Use result
    915   // to calculate the new top. Object size is in words so a shift is required to
    916   // get the number of bytes
    917   ExternalReference new_space_allocation_limit =
    918       ExternalReference::new_space_allocation_limit_address();
    919   mov(scratch2, Operand(new_space_allocation_limit));
    920   ldr(scratch2, MemOperand(scratch2));
    921   add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
    922   cmp(result, Operand(scratch2));
    923   b(hi, gc_required);
    924 
    925   // Update allocation top. result temporarily holds the new top.
    926   if (FLAG_debug_code) {
    927     tst(result, Operand(kObjectAlignmentMask));
    928     Check(eq, "Unaligned allocation in new space");
    929   }
    930   str(result, MemOperand(scratch1));
    931 
    932   // Adjust back to start of new object.
    933   sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
    934 
    935   // Tag object if requested.
    936   if ((flags & TAG_OBJECT) != 0) {
    937     add(result, result, Operand(kHeapObjectTag));
    938   }
    939 }
    940 
    941 
    942 void MacroAssembler::UndoAllocationInNewSpace(Register object,
    943                                               Register scratch) {
    944   ExternalReference new_space_allocation_top =
    945       ExternalReference::new_space_allocation_top_address();
    946 
    947   // Make sure the object has no tag before resetting top.
    948   and_(object, object, Operand(~kHeapObjectTagMask));
    949 #ifdef DEBUG
    950   // Check that the object un-allocated is below the current top.
    951   mov(scratch, Operand(new_space_allocation_top));
    952   ldr(scratch, MemOperand(scratch));
    953   cmp(object, scratch);
    954   Check(lt, "Undo allocation of non allocated memory");
    955 #endif
    956   // Write the address of the object to un-allocate as the current top.
    957   mov(scratch, Operand(new_space_allocation_top));
    958   str(object, MemOperand(scratch));
    959 }
    960 
    961 
    962 void MacroAssembler::AllocateTwoByteString(Register result,
    963                                            Register length,
    964                                            Register scratch1,
    965                                            Register scratch2,
    966                                            Register scratch3,
    967                                            Label* gc_required) {
    968   // Calculate the number of bytes needed for the characters in the string while
    969   // observing object alignment.
    970   ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
    971   mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
    972   add(scratch1, scratch1,
    973       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
    974   // AllocateInNewSpace expects the size in words, so we can round down
    975   // to kObjectAlignment and divide by kPointerSize in the same shift.
    976   ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
    977   mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
    978 
    979   // Allocate two-byte string in new space.
    980   AllocateInNewSpace(scratch1,
    981                      result,
    982                      scratch2,
    983                      scratch3,
    984                      gc_required,
    985                      TAG_OBJECT);
    986 
    987   // Set the map, length and hash field.
    988   LoadRoot(scratch1, Heap::kStringMapRootIndex);
    989   str(length, FieldMemOperand(result, String::kLengthOffset));
    990   str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
    991   mov(scratch2, Operand(String::kEmptyHashField));
    992   str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
    993 }
    994 
    995 
    996 void MacroAssembler::AllocateAsciiString(Register result,
    997                                          Register length,
    998                                          Register scratch1,
    999                                          Register scratch2,
   1000                                          Register scratch3,
   1001                                          Label* gc_required) {
   1002   // Calculate the number of bytes needed for the characters in the string while
   1003   // observing object alignment.
   1004   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   1005   ASSERT(kCharSize == 1);
   1006   add(scratch1, length,
   1007       Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
   1008   // AllocateInNewSpace expects the size in words, so we can round down
   1009   // to kObjectAlignment and divide by kPointerSize in the same shift.
   1010   ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
   1011   mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
   1012 
   1013   // Allocate ASCII string in new space.
   1014   AllocateInNewSpace(scratch1,
   1015                      result,
   1016                      scratch2,
   1017                      scratch3,
   1018                      gc_required,
   1019                      TAG_OBJECT);
   1020 
   1021   // Set the map, length and hash field.
   1022   LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
   1023   mov(scratch1, Operand(Factory::ascii_string_map()));
   1024   str(length, FieldMemOperand(result, String::kLengthOffset));
   1025   str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
   1026   mov(scratch2, Operand(String::kEmptyHashField));
   1027   str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
   1028 }
   1029 
   1030 
   1031 void MacroAssembler::AllocateTwoByteConsString(Register result,
   1032                                                Register length,
   1033                                                Register scratch1,
   1034                                                Register scratch2,
   1035                                                Label* gc_required) {
   1036   AllocateInNewSpace(ConsString::kSize / kPointerSize,
   1037                      result,
   1038                      scratch1,
   1039                      scratch2,
   1040                      gc_required,
   1041                      TAG_OBJECT);
   1042   LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
   1043   mov(scratch2, Operand(String::kEmptyHashField));
   1044   str(length, FieldMemOperand(result, String::kLengthOffset));
   1045   str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
   1046   str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
   1047 }
   1048 
   1049 
   1050 void MacroAssembler::AllocateAsciiConsString(Register result,
   1051                                              Register length,
   1052                                              Register scratch1,
   1053                                              Register scratch2,
   1054                                              Label* gc_required) {
   1055   AllocateInNewSpace(ConsString::kSize / kPointerSize,
   1056                      result,
   1057                      scratch1,
   1058                      scratch2,
   1059                      gc_required,
   1060                      TAG_OBJECT);
   1061   LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
   1062   mov(scratch2, Operand(String::kEmptyHashField));
   1063   str(length, FieldMemOperand(result, String::kLengthOffset));
   1064   str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
   1065   str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
   1066 }
   1067 
   1068 
   1069 void MacroAssembler::CompareObjectType(Register function,
   1070                                        Register map,
   1071                                        Register type_reg,
   1072                                        InstanceType type) {
   1073   ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
   1074   CompareInstanceType(map, type_reg, type);
   1075 }
   1076 
   1077 
   1078 void MacroAssembler::CompareInstanceType(Register map,
   1079                                          Register type_reg,
   1080                                          InstanceType type) {
   1081   ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
   1082   cmp(type_reg, Operand(type));
   1083 }
   1084 
   1085 
   1086 void MacroAssembler::CheckMap(Register obj,
   1087                               Register scratch,
   1088                               Handle<Map> map,
   1089                               Label* fail,
   1090                               bool is_heap_object) {
   1091   if (!is_heap_object) {
   1092     BranchOnSmi(obj, fail);
   1093   }
   1094   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   1095   mov(ip, Operand(map));
   1096   cmp(scratch, ip);
   1097   b(ne, fail);
   1098 }
   1099 
   1100 
   1101 void MacroAssembler::TryGetFunctionPrototype(Register function,
   1102                                              Register result,
   1103                                              Register scratch,
   1104                                              Label* miss) {
   1105   // Check that the receiver isn't a smi.
   1106   BranchOnSmi(function, miss);
   1107 
   1108   // Check that the function really is a function.  Load map into result reg.
   1109   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   1110   b(ne, miss);
   1111 
   1112   // Make sure that the function has an instance prototype.
   1113   Label non_instance;
   1114   ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   1115   tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
   1116   b(ne, &non_instance);
   1117 
   1118   // Get the prototype or initial map from the function.
   1119   ldr(result,
   1120       FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   1121 
   1122   // If the prototype or initial map is the hole, don't return it and
   1123   // simply miss the cache instead. This will allow us to allocate a
   1124   // prototype object on-demand in the runtime system.
   1125   LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   1126   cmp(result, ip);
   1127   b(eq, miss);
   1128 
   1129   // If the function does not have an initial map, we're done.
   1130   Label done;
   1131   CompareObjectType(result, scratch, scratch, MAP_TYPE);
   1132   b(ne, &done);
   1133 
   1134   // Get the prototype from the initial map.
   1135   ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   1136   jmp(&done);
   1137 
   1138   // Non-instance prototype: Fetch prototype from constructor field
   1139   // in initial map.
   1140   bind(&non_instance);
   1141   ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
   1142 
   1143   // All done.
   1144   bind(&done);
   1145 }
   1146 
   1147 
   1148 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
   1149   ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
   1150   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
   1151 }
   1152 
   1153 
   1154 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
   1155   ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
   1156   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
   1157 }
   1158 
   1159 
   1160 void MacroAssembler::StubReturn(int argc) {
   1161   ASSERT(argc >= 1 && generating_stub());
   1162   if (argc > 1) {
   1163     add(sp, sp, Operand((argc - 1) * kPointerSize));
   1164   }
   1165   Ret();
   1166 }
   1167 
   1168 
   1169 void MacroAssembler::IllegalOperation(int num_arguments) {
   1170   if (num_arguments > 0) {
   1171     add(sp, sp, Operand(num_arguments * kPointerSize));
   1172   }
   1173   LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   1174 }
   1175 
   1176 
   1177 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
   1178                                                        Register outHighReg,
   1179                                                        Register outLowReg) {
   1180   // ARMv7 VFP3 instructions to implement integer to double conversion.
   1181   mov(r7, Operand(inReg, ASR, kSmiTagSize));
   1182   vmov(s15, r7);
   1183   vcvt(d7, s15);
   1184   vmov(outLowReg, outHighReg, d7);
   1185 }
   1186 
   1187 
   1188 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
   1189                                          Register src,
   1190                                          int num_least_bits) {
   1191   if (CpuFeatures::IsSupported(ARMv7)) {
   1192     ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
   1193   } else {
   1194     mov(dst, Operand(src, ASR, kSmiTagSize));
   1195     and_(dst, dst, Operand((1 << num_least_bits) - 1));
   1196   }
   1197 }
   1198 
   1199 
   1200 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
   1201   // All parameters are on the stack.  r0 has the return value after call.
   1202 
   1203   // If the expected number of arguments of the runtime function is
   1204   // constant, we check that the actual number of arguments match the
   1205   // expectation.
   1206   if (f->nargs >= 0 && f->nargs != num_arguments) {
   1207     IllegalOperation(num_arguments);
   1208     return;
   1209   }
   1210 
   1211   // TODO(1236192): Most runtime routines don't need the number of
   1212   // arguments passed in because it is constant. At some point we
   1213   // should remove this need and make the runtime routine entry code
   1214   // smarter.
   1215   mov(r0, Operand(num_arguments));
   1216   mov(r1, Operand(ExternalReference(f)));
   1217   CEntryStub stub(1);
   1218   CallStub(&stub);
   1219 }
   1220 
   1221 
   1222 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
   1223   CallRuntime(Runtime::FunctionForId(fid), num_arguments);
   1224 }
   1225 
   1226 
   1227 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
   1228                                            int num_arguments) {
   1229   mov(r0, Operand(num_arguments));
   1230   mov(r1, Operand(ext));
   1231 
   1232   CEntryStub stub(1);
   1233   CallStub(&stub);
   1234 }
   1235 
   1236 
   1237 void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
   1238                                      int num_arguments,
   1239                                      int result_size) {
   1240   // TODO(1236192): Most runtime routines don't need the number of
   1241   // arguments passed in because it is constant. At some point we
   1242   // should remove this need and make the runtime routine entry code
   1243   // smarter.
   1244   mov(r0, Operand(num_arguments));
   1245   JumpToRuntime(ext);
   1246 }
   1247 
   1248 
   1249 void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
   1250 #if defined(__thumb__)
   1251   // Thumb mode builtin.
   1252   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
   1253 #endif
   1254   mov(r1, Operand(builtin));
   1255   CEntryStub stub(1);
   1256   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   1257 }
   1258 
   1259 
   1260 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
   1261                                    InvokeJSFlags flags) {
   1262   GetBuiltinEntry(r2, id);
   1263   if (flags == CALL_JS) {
   1264     Call(r2);
   1265   } else {
   1266     ASSERT(flags == JUMP_JS);
   1267     Jump(r2);
   1268   }
   1269 }
   1270 
   1271 
   1272 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   1273   // Load the JavaScript builtin function from the builtins object.
   1274   ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   1275   ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
   1276   int builtins_offset =
   1277       JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
   1278   ldr(r1, FieldMemOperand(r1, builtins_offset));
   1279   // Load the code entry point from the function into the target register.
   1280   ldr(target, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   1281   ldr(target, FieldMemOperand(target, SharedFunctionInfo::kCodeOffset));
   1282   add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   1283 }
   1284 
   1285 
   1286 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
   1287                                 Register scratch1, Register scratch2) {
   1288   if (FLAG_native_code_counters && counter->Enabled()) {
   1289     mov(scratch1, Operand(value));
   1290     mov(scratch2, Operand(ExternalReference(counter)));
   1291     str(scratch1, MemOperand(scratch2));
   1292   }
   1293 }
   1294 
   1295 
   1296 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
   1297                                       Register scratch1, Register scratch2) {
   1298   ASSERT(value > 0);
   1299   if (FLAG_native_code_counters && counter->Enabled()) {
   1300     mov(scratch2, Operand(ExternalReference(counter)));
   1301     ldr(scratch1, MemOperand(scratch2));
   1302     add(scratch1, scratch1, Operand(value));
   1303     str(scratch1, MemOperand(scratch2));
   1304   }
   1305 }
   1306 
   1307 
   1308 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
   1309                                       Register scratch1, Register scratch2) {
   1310   ASSERT(value > 0);
   1311   if (FLAG_native_code_counters && counter->Enabled()) {
   1312     mov(scratch2, Operand(ExternalReference(counter)));
   1313     ldr(scratch1, MemOperand(scratch2));
   1314     sub(scratch1, scratch1, Operand(value));
   1315     str(scratch1, MemOperand(scratch2));
   1316   }
   1317 }
   1318 
   1319 
   1320 void MacroAssembler::Assert(Condition cc, const char* msg) {
   1321   if (FLAG_debug_code)
   1322     Check(cc, msg);
   1323 }
   1324 
   1325 
   1326 void MacroAssembler::Check(Condition cc, const char* msg) {
   1327   Label L;
   1328   b(cc, &L);
   1329   Abort(msg);
   1330   // will not return here
   1331   bind(&L);
   1332 }
   1333 
   1334 
   1335 void MacroAssembler::Abort(const char* msg) {
   1336   // We want to pass the msg string like a smi to avoid GC
   1337   // problems, however msg is not guaranteed to be aligned
   1338   // properly. Instead, we pass an aligned pointer that is
   1339   // a proper v8 smi, but also pass the alignment difference
   1340   // from the real pointer as a smi.
   1341   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
   1342   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
   1343   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
   1344 #ifdef DEBUG
   1345   if (msg != NULL) {
   1346     RecordComment("Abort message: ");
   1347     RecordComment(msg);
   1348   }
   1349 #endif
   1350   // Disable stub call restrictions to always allow calls to abort.
   1351   set_allow_stub_calls(true);
   1352 
   1353   mov(r0, Operand(p0));
   1354   push(r0);
   1355   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   1356   push(r0);
   1357   CallRuntime(Runtime::kAbort, 2);
   1358   // will not return here
   1359 }
   1360 
   1361 
   1362 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   1363   if (context_chain_length > 0) {
   1364     // Move up the chain of contexts to the context containing the slot.
   1365     ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
   1366     // Load the function context (which is the incoming, outer context).
   1367     ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
   1368     for (int i = 1; i < context_chain_length; i++) {
   1369       ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
   1370       ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
   1371     }
   1372     // The context may be an intermediate context, not a function context.
   1373     ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   1374   } else {  // Slot is in the current function context.
   1375     // The context may be an intermediate context, not a function context.
   1376     ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
   1377   }
   1378 }
   1379 
   1380 
   1381 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
   1382                                       Register reg2,
   1383                                       Label* on_not_both_smi) {
   1384   ASSERT_EQ(0, kSmiTag);
   1385   tst(reg1, Operand(kSmiTagMask));
   1386   tst(reg2, Operand(kSmiTagMask), eq);
   1387   b(ne, on_not_both_smi);
   1388 }
   1389 
   1390 
   1391 void MacroAssembler::JumpIfEitherSmi(Register reg1,
   1392                                      Register reg2,
   1393                                      Label* on_either_smi) {
   1394   ASSERT_EQ(0, kSmiTag);
   1395   tst(reg1, Operand(kSmiTagMask));
   1396   tst(reg2, Operand(kSmiTagMask), ne);
   1397   b(eq, on_either_smi);
   1398 }
   1399 
   1400 
   1401 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
   1402     Register first,
   1403     Register second,
   1404     Register scratch1,
   1405     Register scratch2,
   1406     Label* failure) {
   1407   // Test that both first and second are sequential ASCII strings.
   1408   // Assume that they are non-smis.
   1409   ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   1410   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   1411   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   1412   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
   1413   int kFlatAsciiStringMask =
   1414       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   1415   int kFlatAsciiStringTag = ASCII_STRING_TYPE;
   1416   and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
   1417   and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
   1418   cmp(scratch1, Operand(kFlatAsciiStringTag));
   1419   // Ignore second test if first test failed.
   1420   cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
   1421   b(ne, failure);
   1422 }
   1423 
   1424 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
   1425                                                          Register second,
   1426                                                          Register scratch1,
   1427                                                          Register scratch2,
   1428                                                          Label* failure) {
   1429   // Check that neither is a smi.
   1430   ASSERT_EQ(0, kSmiTag);
   1431   and_(scratch1, first, Operand(second));
   1432   tst(scratch1, Operand(kSmiTagMask));
   1433   b(eq, failure);
   1434   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
   1435                                              second,
   1436                                              scratch1,
   1437                                              scratch2,
   1438                                              failure);
   1439 }
   1440 
   1441 
   1442 #ifdef ENABLE_DEBUGGER_SUPPORT
   1443 CodePatcher::CodePatcher(byte* address, int instructions)
   1444     : address_(address),
   1445       instructions_(instructions),
   1446       size_(instructions * Assembler::kInstrSize),
   1447       masm_(address, size_ + Assembler::kGap) {
   1448   // Create a new macro assembler pointing to the address of the code to patch.
   1449   // The size is adjusted with kGap on order for the assembler to generate size
   1450   // bytes of instructions without failing with buffer size constraints.
   1451   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   1452 }
   1453 
   1454 
   1455 CodePatcher::~CodePatcher() {
   1456   // Indicate that code has changed.
   1457   CPU::FlushICache(address_, size_);
   1458 
   1459   // Check that the code was patched as expected.
   1460   ASSERT(masm_.pc_ == address_ + size_);
   1461   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
   1462 }
   1463 
   1464 
   1465 void CodePatcher::Emit(Instr x) {
   1466   masm()->emit(x);
   1467 }
   1468 
   1469 
   1470 void CodePatcher::Emit(Address addr) {
   1471   masm()->emit(reinterpret_cast<Instr>(addr));
   1472 }
   1473 #endif  // ENABLE_DEBUGGER_SUPPORT
   1474 
   1475 
   1476 } }  // namespace v8::internal
   1477