Home | History | Annotate | Download | only in arm
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if defined(V8_TARGET_ARCH_ARM)
     31 
     32 #include "bootstrapper.h"
     33 #include "code-stubs.h"
     34 #include "regexp-macro-assembler.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 #define __ ACCESS_MASM(masm)
     41 
     42 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
     43                                           Label* slow,
     44                                           Condition cond,
     45                                           bool never_nan_nan);
     46 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
     47                                     Register lhs,
     48                                     Register rhs,
     49                                     Label* lhs_not_nan,
     50                                     Label* slow,
     51                                     bool strict);
     52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
     53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
     54                                            Register lhs,
     55                                            Register rhs);
     56 
     57 
     58 void ToNumberStub::Generate(MacroAssembler* masm) {
     59   // The ToNumber stub takes one argument in eax.
     60   Label check_heap_number, call_builtin;
     61   __ tst(r0, Operand(kSmiTagMask));
     62   __ b(ne, &check_heap_number);
     63   __ Ret();
     64 
     65   __ bind(&check_heap_number);
     66   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
     67   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
     68   __ cmp(r1, ip);
     69   __ b(ne, &call_builtin);
     70   __ Ret();
     71 
     72   __ bind(&call_builtin);
     73   __ push(r0);
     74   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
     75 }
     76 
     77 
     78 void FastNewClosureStub::Generate(MacroAssembler* masm) {
     79   // Create a new closure from the given function info in new
     80   // space. Set the context to the current context in cp.
     81   Label gc;
     82 
     83   // Pop the function info from the stack.
     84   __ pop(r3);
     85 
     86   // Attempt to allocate new JSFunction in new space.
     87   __ AllocateInNewSpace(JSFunction::kSize,
     88                         r0,
     89                         r1,
     90                         r2,
     91                         &gc,
     92                         TAG_OBJECT);
     93 
     94   int map_index = strict_mode_ == kStrictMode
     95       ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
     96       : Context::FUNCTION_MAP_INDEX;
     97 
     98   // Compute the function map in the current global context and set that
     99   // as the map of the allocated object.
    100   __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
    101   __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
    102   __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
    103   __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
    104 
    105   // Initialize the rest of the function. We don't have to update the
    106   // write barrier because the allocated object is in new space.
    107   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
    108   __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
    109   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
    110   __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
    111   __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
    112   __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
    113   __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
    114   __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
    115   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
    116   __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
    117 
    118 
    119   // Initialize the code pointer in the function to be the one
    120   // found in the shared function info object.
    121   __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
    122   __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
    123   __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
    124 
    125   // Return result. The argument function info has been popped already.
    126   __ Ret();
    127 
    128   // Create a new closure through the slower runtime call.
    129   __ bind(&gc);
    130   __ LoadRoot(r4, Heap::kFalseValueRootIndex);
    131   __ Push(cp, r3, r4);
    132   __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
    133 }
    134 
    135 
    136 void FastNewContextStub::Generate(MacroAssembler* masm) {
    137   // Try to allocate the context in new space.
    138   Label gc;
    139   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
    140 
    141   // Attempt to allocate the context in new space.
    142   __ AllocateInNewSpace(FixedArray::SizeFor(length),
    143                         r0,
    144                         r1,
    145                         r2,
    146                         &gc,
    147                         TAG_OBJECT);
    148 
    149   // Load the function from the stack.
    150   __ ldr(r3, MemOperand(sp, 0));
    151 
    152   // Setup the object header.
    153   __ LoadRoot(r2, Heap::kContextMapRootIndex);
    154   __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
    155   __ mov(r2, Operand(Smi::FromInt(length)));
    156   __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
    157 
    158   // Setup the fixed slots.
    159   __ mov(r1, Operand(Smi::FromInt(0)));
    160   __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
    161   __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
    162   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
    163   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
    164 
    165   // Copy the global object from the surrounding context.
    166   __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
    167   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
    168 
    169   // Initialize the rest of the slots to undefined.
    170   __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
    171   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
    172     __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
    173   }
    174 
    175   // Remove the on-stack argument and return.
    176   __ mov(cp, r0);
    177   __ pop();
    178   __ Ret();
    179 
    180   // Need to collect. Call into runtime system.
    181   __ bind(&gc);
    182   __ TailCallRuntime(Runtime::kNewContext, 1, 1);
    183 }
    184 
    185 
    186 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
    187   // Stack layout on entry:
    188   //
    189   // [sp]: constant elements.
    190   // [sp + kPointerSize]: literal index.
    191   // [sp + (2 * kPointerSize)]: literals array.
    192 
    193   // All sizes here are multiples of kPointerSize.
    194   int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
    195   int size = JSArray::kSize + elements_size;
    196 
    197   // Load boilerplate object into r3 and check if we need to create a
    198   // boilerplate.
    199   Label slow_case;
    200   __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
    201   __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
    202   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    203   __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
    204   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
    205   __ cmp(r3, ip);
    206   __ b(eq, &slow_case);
    207 
    208   if (FLAG_debug_code) {
    209     const char* message;
    210     Heap::RootListIndex expected_map_index;
    211     if (mode_ == CLONE_ELEMENTS) {
    212       message = "Expected (writable) fixed array";
    213       expected_map_index = Heap::kFixedArrayMapRootIndex;
    214     } else {
    215       ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
    216       message = "Expected copy-on-write fixed array";
    217       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
    218     }
    219     __ push(r3);
    220     __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
    221     __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
    222     __ LoadRoot(ip, expected_map_index);
    223     __ cmp(r3, ip);
    224     __ Assert(eq, message);
    225     __ pop(r3);
    226   }
    227 
    228   // Allocate both the JS array and the elements array in one big
    229   // allocation. This avoids multiple limit checks.
    230   __ AllocateInNewSpace(size,
    231                         r0,
    232                         r1,
    233                         r2,
    234                         &slow_case,
    235                         TAG_OBJECT);
    236 
    237   // Copy the JS array part.
    238   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
    239     if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
    240       __ ldr(r1, FieldMemOperand(r3, i));
    241       __ str(r1, FieldMemOperand(r0, i));
    242     }
    243   }
    244 
    245   if (length_ > 0) {
    246     // Get hold of the elements array of the boilerplate and setup the
    247     // elements pointer in the resulting object.
    248     __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
    249     __ add(r2, r0, Operand(JSArray::kSize));
    250     __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
    251 
    252     // Copy the elements array.
    253     __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
    254   }
    255 
    256   // Return and remove the on-stack parameters.
    257   __ add(sp, sp, Operand(3 * kPointerSize));
    258   __ Ret();
    259 
    260   __ bind(&slow_case);
    261   __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
    262 }
    263 
    264 
    265 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
    266 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
    267 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
    268 // scratch register.  Destroys the source register.  No GC occurs during this
    269 // stub so you don't have to set up the frame.
    270 class ConvertToDoubleStub : public CodeStub {
    271  public:
    272   ConvertToDoubleStub(Register result_reg_1,
    273                       Register result_reg_2,
    274                       Register source_reg,
    275                       Register scratch_reg)
    276       : result1_(result_reg_1),
    277         result2_(result_reg_2),
    278         source_(source_reg),
    279         zeros_(scratch_reg) { }
    280 
    281  private:
    282   Register result1_;
    283   Register result2_;
    284   Register source_;
    285   Register zeros_;
    286 
    287   // Minor key encoding in 16 bits.
    288   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
    289   class OpBits: public BitField<Token::Value, 2, 14> {};
    290 
    291   Major MajorKey() { return ConvertToDouble; }
    292   int MinorKey() {
    293     // Encode the parameters in a unique 16 bit value.
    294     return  result1_.code() +
    295            (result2_.code() << 4) +
    296            (source_.code() << 8) +
    297            (zeros_.code() << 12);
    298   }
    299 
    300   void Generate(MacroAssembler* masm);
    301 
    302   const char* GetName() { return "ConvertToDoubleStub"; }
    303 
    304 #ifdef DEBUG
    305   void Print() { PrintF("ConvertToDoubleStub\n"); }
    306 #endif
    307 };
    308 
    309 
    310 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
    311   Register exponent = result1_;
    312   Register mantissa = result2_;
    313 
    314   Label not_special;
    315   // Convert from Smi to integer.
    316   __ mov(source_, Operand(source_, ASR, kSmiTagSize));
    317   // Move sign bit from source to destination.  This works because the sign bit
    318   // in the exponent word of the double has the same position and polarity as
    319   // the 2's complement sign bit in a Smi.
    320   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
    321   __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
    322   // Subtract from 0 if source was negative.
    323   __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
    324 
    325   // We have -1, 0 or 1, which we treat specially. Register source_ contains
    326   // absolute value: it is either equal to 1 (special case of -1 and 1),
    327   // greater than 1 (not a special case) or less than 1 (special case of 0).
    328   __ cmp(source_, Operand(1));
    329   __ b(gt, &not_special);
    330 
    331   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
    332   static const uint32_t exponent_word_for_1 =
    333       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
    334   __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
    335   // 1, 0 and -1 all have 0 for the second word.
    336   __ mov(mantissa, Operand(0, RelocInfo::NONE));
    337   __ Ret();
    338 
    339   __ bind(&not_special);
    340   // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
    341   // Gets the wrong answer for 0, but we already checked for that case above.
    342   __ CountLeadingZeros(zeros_, source_, mantissa);
    343   // Compute exponent and or it into the exponent register.
    344   // We use mantissa as a scratch register here.  Use a fudge factor to
    345   // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
    346   // that fit in the ARM's constant field.
    347   int fudge = 0x400;
    348   __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
    349   __ add(mantissa, mantissa, Operand(fudge));
    350   __ orr(exponent,
    351          exponent,
    352          Operand(mantissa, LSL, HeapNumber::kExponentShift));
    353   // Shift up the source chopping the top bit off.
    354   __ add(zeros_, zeros_, Operand(1));
    355   // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
    356   __ mov(source_, Operand(source_, LSL, zeros_));
    357   // Compute lower part of fraction (last 12 bits).
    358   __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
    359   // And the top (top 20 bits).
    360   __ orr(exponent,
    361          exponent,
    362          Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
    363   __ Ret();
    364 }
    365 
    366 
    367 class FloatingPointHelper : public AllStatic {
    368  public:
    369 
    370   enum Destination {
    371     kVFPRegisters,
    372     kCoreRegisters
    373   };
    374 
    375 
    376   // Loads smis from r0 and r1 (right and left in binary operations) into
    377   // floating point registers. Depending on the destination the values ends up
    378   // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
    379   // floating point registers VFP3 must be supported. If core registers are
    380   // requested when VFP3 is supported d6 and d7 will be scratched.
    381   static void LoadSmis(MacroAssembler* masm,
    382                        Destination destination,
    383                        Register scratch1,
    384                        Register scratch2);
    385 
    386   // Loads objects from r0 and r1 (right and left in binary operations) into
    387   // floating point registers. Depending on the destination the values ends up
    388   // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
    389   // floating point registers VFP3 must be supported. If core registers are
    390   // requested when VFP3 is supported d6 and d7 will still be scratched. If
    391   // either r0 or r1 is not a number (not smi and not heap number object) the
    392   // not_number label is jumped to with r0 and r1 intact.
    393   static void LoadOperands(MacroAssembler* masm,
    394                            FloatingPointHelper::Destination destination,
    395                            Register heap_number_map,
    396                            Register scratch1,
    397                            Register scratch2,
    398                            Label* not_number);
    399 
    400   // Convert the smi or heap number in object to an int32 using the rules
    401   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
    402   // and brought into the range -2^31 .. +2^31 - 1.
    403   static void ConvertNumberToInt32(MacroAssembler* masm,
    404                                    Register object,
    405                                    Register dst,
    406                                    Register heap_number_map,
    407                                    Register scratch1,
    408                                    Register scratch2,
    409                                    Register scratch3,
    410                                    DwVfpRegister double_scratch,
    411                                    Label* not_int32);
    412 
    413   // Load the number from object into double_dst in the double format.
    414   // Control will jump to not_int32 if the value cannot be exactly represented
    415   // by a 32-bit integer.
    416   // Floating point value in the 32-bit integer range that are not exact integer
    417   // won't be loaded.
    418   static void LoadNumberAsInt32Double(MacroAssembler* masm,
    419                                       Register object,
    420                                       Destination destination,
    421                                       DwVfpRegister double_dst,
    422                                       Register dst1,
    423                                       Register dst2,
    424                                       Register heap_number_map,
    425                                       Register scratch1,
    426                                       Register scratch2,
    427                                       SwVfpRegister single_scratch,
    428                                       Label* not_int32);
    429 
    430   // Loads the number from object into dst as a 32-bit integer.
    431   // Control will jump to not_int32 if the object cannot be exactly represented
    432   // by a 32-bit integer.
    433   // Floating point value in the 32-bit integer range that are not exact integer
    434   // won't be converted.
    435   // scratch3 is not used when VFP3 is supported.
    436   static void LoadNumberAsInt32(MacroAssembler* masm,
    437                                 Register object,
    438                                 Register dst,
    439                                 Register heap_number_map,
    440                                 Register scratch1,
    441                                 Register scratch2,
    442                                 Register scratch3,
    443                                 DwVfpRegister double_scratch,
    444                                 Label* not_int32);
    445 
    446   // Generate non VFP3 code to check if a double can be exactly represented by a
    447   // 32-bit integer. This does not check for 0 or -0, which need
    448   // to be checked for separately.
    449   // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
    450   // through otherwise.
    451   // src1 and src2 will be cloberred.
    452   //
    453   // Expected input:
    454   // - src1: higher (exponent) part of the double value.
    455   // - src2: lower (mantissa) part of the double value.
    456   // Output status:
    457   // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
    458   // - src2: contains 1.
    459   // - other registers are clobbered.
    460   static void DoubleIs32BitInteger(MacroAssembler* masm,
    461                                    Register src1,
    462                                    Register src2,
    463                                    Register dst,
    464                                    Register scratch,
    465                                    Label* not_int32);
    466 
    467   // Generates code to call a C function to do a double operation using core
    468   // registers. (Used when VFP3 is not supported.)
    469   // This code never falls through, but returns with a heap number containing
    470   // the result in r0.
    471   // Register heapnumber_result must be a heap number in which the
    472   // result of the operation will be stored.
    473   // Requires the following layout on entry:
    474   // r0: Left value (least significant part of mantissa).
    475   // r1: Left value (sign, exponent, top of mantissa).
    476   // r2: Right value (least significant part of mantissa).
    477   // r3: Right value (sign, exponent, top of mantissa).
    478   static void CallCCodeForDoubleOperation(MacroAssembler* masm,
    479                                           Token::Value op,
    480                                           Register heap_number_result,
    481                                           Register scratch);
    482 
    483  private:
    484   static void LoadNumber(MacroAssembler* masm,
    485                          FloatingPointHelper::Destination destination,
    486                          Register object,
    487                          DwVfpRegister dst,
    488                          Register dst1,
    489                          Register dst2,
    490                          Register heap_number_map,
    491                          Register scratch1,
    492                          Register scratch2,
    493                          Label* not_number);
    494 };
    495 
    496 
    497 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
    498                                    FloatingPointHelper::Destination destination,
    499                                    Register scratch1,
    500                                    Register scratch2) {
    501   if (CpuFeatures::IsSupported(VFP3)) {
    502     CpuFeatures::Scope scope(VFP3);
    503     __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
    504     __ vmov(d7.high(), scratch1);
    505     __ vcvt_f64_s32(d7, d7.high());
    506     __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
    507     __ vmov(d6.high(), scratch1);
    508     __ vcvt_f64_s32(d6, d6.high());
    509     if (destination == kCoreRegisters) {
    510       __ vmov(r2, r3, d7);
    511       __ vmov(r0, r1, d6);
    512     }
    513   } else {
    514     ASSERT(destination == kCoreRegisters);
    515     // Write Smi from r0 to r3 and r2 in double format.
    516     __ mov(scratch1, Operand(r0));
    517     ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
    518     __ push(lr);
    519     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
    520     // Write Smi from r1 to r1 and r0 in double format.
    521     __ mov(scratch1, Operand(r1));
    522     ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
    523     __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
    524     __ pop(lr);
    525   }
    526 }
    527 
    528 
    529 void FloatingPointHelper::LoadOperands(
    530     MacroAssembler* masm,
    531     FloatingPointHelper::Destination destination,
    532     Register heap_number_map,
    533     Register scratch1,
    534     Register scratch2,
    535     Label* slow) {
    536 
    537   // Load right operand (r0) to d6 or r2/r3.
    538   LoadNumber(masm, destination,
    539              r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
    540 
    541   // Load left operand (r1) to d7 or r0/r1.
    542   LoadNumber(masm, destination,
    543              r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
    544 }
    545 
    546 
    547 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
    548                                      Destination destination,
    549                                      Register object,
    550                                      DwVfpRegister dst,
    551                                      Register dst1,
    552                                      Register dst2,
    553                                      Register heap_number_map,
    554                                      Register scratch1,
    555                                      Register scratch2,
    556                                      Label* not_number) {
    557   if (FLAG_debug_code) {
    558     __ AbortIfNotRootValue(heap_number_map,
    559                            Heap::kHeapNumberMapRootIndex,
    560                            "HeapNumberMap register clobbered.");
    561   }
    562 
    563   Label is_smi, done;
    564 
    565   __ JumpIfSmi(object, &is_smi);
    566   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
    567 
    568   // Handle loading a double from a heap number.
    569   if (CpuFeatures::IsSupported(VFP3) &&
    570       destination == kVFPRegisters) {
    571     CpuFeatures::Scope scope(VFP3);
    572     // Load the double from tagged HeapNumber to double register.
    573     __ sub(scratch1, object, Operand(kHeapObjectTag));
    574     __ vldr(dst, scratch1, HeapNumber::kValueOffset);
    575   } else {
    576     ASSERT(destination == kCoreRegisters);
    577     // Load the double from heap number to dst1 and dst2 in double format.
    578     __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
    579   }
    580   __ jmp(&done);
    581 
    582   // Handle loading a double from a smi.
    583   __ bind(&is_smi);
    584   if (CpuFeatures::IsSupported(VFP3)) {
    585     CpuFeatures::Scope scope(VFP3);
    586     // Convert smi to double using VFP instructions.
    587     __ SmiUntag(scratch1, object);
    588     __ vmov(dst.high(), scratch1);
    589     __ vcvt_f64_s32(dst, dst.high());
    590     if (destination == kCoreRegisters) {
    591       // Load the converted smi to dst1 and dst2 in double format.
    592       __ vmov(dst1, dst2, dst);
    593     }
    594   } else {
    595     ASSERT(destination == kCoreRegisters);
    596     // Write smi to dst1 and dst2 double format.
    597     __ mov(scratch1, Operand(object));
    598     ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
    599     __ push(lr);
    600     __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
    601     __ pop(lr);
    602   }
    603 
    604   __ bind(&done);
    605 }
    606 
    607 
    608 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
    609                                                Register object,
    610                                                Register dst,
    611                                                Register heap_number_map,
    612                                                Register scratch1,
    613                                                Register scratch2,
    614                                                Register scratch3,
    615                                                DwVfpRegister double_scratch,
    616                                                Label* not_number) {
    617   if (FLAG_debug_code) {
    618     __ AbortIfNotRootValue(heap_number_map,
    619                            Heap::kHeapNumberMapRootIndex,
    620                            "HeapNumberMap register clobbered.");
    621   }
    622   Label is_smi;
    623   Label done;
    624   Label not_in_int32_range;
    625 
    626   __ JumpIfSmi(object, &is_smi);
    627   __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
    628   __ cmp(scratch1, heap_number_map);
    629   __ b(ne, not_number);
    630   __ ConvertToInt32(object,
    631                     dst,
    632                     scratch1,
    633                     scratch2,
    634                     double_scratch,
    635                     &not_in_int32_range);
    636   __ jmp(&done);
    637 
    638   __ bind(&not_in_int32_range);
    639   __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
    640   __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
    641 
    642   __ EmitOutOfInt32RangeTruncate(dst,
    643                                  scratch1,
    644                                  scratch2,
    645                                  scratch3);
    646   __ jmp(&done);
    647 
    648   __ bind(&is_smi);
    649   __ SmiUntag(dst, object);
    650   __ bind(&done);
    651 }
    652 
    653 
    654 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
    655                                                   Register object,
    656                                                   Destination destination,
    657                                                   DwVfpRegister double_dst,
    658                                                   Register dst1,
    659                                                   Register dst2,
    660                                                   Register heap_number_map,
    661                                                   Register scratch1,
    662                                                   Register scratch2,
    663                                                   SwVfpRegister single_scratch,
    664                                                   Label* not_int32) {
    665   ASSERT(!scratch1.is(object) && !scratch2.is(object));
    666   ASSERT(!scratch1.is(scratch2));
    667   ASSERT(!heap_number_map.is(object) &&
    668          !heap_number_map.is(scratch1) &&
    669          !heap_number_map.is(scratch2));
    670 
    671   Label done, obj_is_not_smi;
    672 
    673   __ JumpIfNotSmi(object, &obj_is_not_smi);
    674   __ SmiUntag(scratch1, object);
    675   if (CpuFeatures::IsSupported(VFP3)) {
    676     CpuFeatures::Scope scope(VFP3);
    677     __ vmov(single_scratch, scratch1);
    678     __ vcvt_f64_s32(double_dst, single_scratch);
    679     if (destination == kCoreRegisters) {
    680       __ vmov(dst1, dst2, double_dst);
    681     }
    682   } else {
    683     Label fewer_than_20_useful_bits;
    684     // Expected output:
    685     // |         dst2            |         dst1            |
    686     // | s |   exp   |              mantissa               |
    687 
    688     // Check for zero.
    689     __ cmp(scratch1, Operand(0));
    690     __ mov(dst2, scratch1);
    691     __ mov(dst1, scratch1);
    692     __ b(eq, &done);
    693 
    694     // Preload the sign of the value.
    695     __ and_(dst2, scratch1, Operand(HeapNumber::kSignMask), SetCC);
    696     // Get the absolute value of the object (as an unsigned integer).
    697     __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
    698 
    699     // Get mantisssa[51:20].
    700 
    701     // Get the position of the first set bit.
    702     __ CountLeadingZeros(dst1, scratch1, scratch2);
    703     __ rsb(dst1, dst1, Operand(31));
    704 
    705     // Set the exponent.
    706     __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
    707     __ Bfi(dst2, scratch2, scratch2,
    708         HeapNumber::kExponentShift, HeapNumber::kExponentBits);
    709 
    710     // Clear the first non null bit.
    711     __ mov(scratch2, Operand(1));
    712     __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst1));
    713 
    714     __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
    715     // Get the number of bits to set in the lower part of the mantissa.
    716     __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
    717     __ b(mi, &fewer_than_20_useful_bits);
    718     // Set the higher 20 bits of the mantissa.
    719     __ orr(dst2, dst2, Operand(scratch1, LSR, scratch2));
    720     __ rsb(scratch2, scratch2, Operand(32));
    721     __ mov(dst1, Operand(scratch1, LSL, scratch2));
    722     __ b(&done);
    723 
    724     __ bind(&fewer_than_20_useful_bits);
    725     __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
    726     __ mov(scratch2, Operand(scratch1, LSL, scratch2));
    727     __ orr(dst2, dst2, scratch2);
    728     // Set dst1 to 0.
    729     __ mov(dst1, Operand(0));
    730   }
    731 
    732   __ b(&done);
    733 
    734   __ bind(&obj_is_not_smi);
    735   if (FLAG_debug_code) {
    736     __ AbortIfNotRootValue(heap_number_map,
    737                            Heap::kHeapNumberMapRootIndex,
    738                            "HeapNumberMap register clobbered.");
    739   }
    740   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
    741 
    742   // Load the number.
    743   if (CpuFeatures::IsSupported(VFP3)) {
    744     CpuFeatures::Scope scope(VFP3);
    745     // Load the double value.
    746     __ sub(scratch1, object, Operand(kHeapObjectTag));
    747     __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
    748 
    749     __ EmitVFPTruncate(kRoundToZero,
    750                        single_scratch,
    751                        double_dst,
    752                        scratch1,
    753                        scratch2,
    754                        kCheckForInexactConversion);
    755 
    756     // Jump to not_int32 if the operation did not succeed.
    757     __ b(ne, not_int32);
    758 
    759     if (destination == kCoreRegisters) {
    760       __ vmov(dst1, dst2, double_dst);
    761     }
    762 
    763   } else {
    764     ASSERT(!scratch1.is(object) && !scratch2.is(object));
    765     // Load the double value in the destination registers..
    766     __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
    767 
    768     // Check for 0 and -0.
    769     __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
    770     __ orr(scratch1, scratch1, Operand(dst2));
    771     __ cmp(scratch1, Operand(0));
    772     __ b(eq, &done);
    773 
    774     // Check that the value can be exactly represented by a 32-bit integer.
    775     // Jump to not_int32 if that's not the case.
    776     DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
    777 
    778     // dst1 and dst2 were trashed. Reload the double value.
    779     __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
    780   }
    781 
    782   __ bind(&done);
    783 }
    784 
    785 
    786 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
    787                                             Register object,
    788                                             Register dst,
    789                                             Register heap_number_map,
    790                                             Register scratch1,
    791                                             Register scratch2,
    792                                             Register scratch3,
    793                                             DwVfpRegister double_scratch,
    794                                             Label* not_int32) {
    795   ASSERT(!dst.is(object));
    796   ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
    797   ASSERT(!scratch1.is(scratch2) &&
    798          !scratch1.is(scratch3) &&
    799          !scratch2.is(scratch3));
    800 
    801   Label done;
    802 
    803   // Untag the object into the destination register.
    804   __ SmiUntag(dst, object);
    805   // Just return if the object is a smi.
    806   __ JumpIfSmi(object, &done);
    807 
    808   if (FLAG_debug_code) {
    809     __ AbortIfNotRootValue(heap_number_map,
    810                            Heap::kHeapNumberMapRootIndex,
    811                            "HeapNumberMap register clobbered.");
    812   }
    813   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
    814 
    815   // Object is a heap number.
    816   // Convert the floating point value to a 32-bit integer.
    817   if (CpuFeatures::IsSupported(VFP3)) {
    818     CpuFeatures::Scope scope(VFP3);
    819     SwVfpRegister single_scratch = double_scratch.low();
    820     // Load the double value.
    821     __ sub(scratch1, object, Operand(kHeapObjectTag));
    822     __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
    823 
    824     __ EmitVFPTruncate(kRoundToZero,
    825                        single_scratch,
    826                        double_scratch,
    827                        scratch1,
    828                        scratch2,
    829                        kCheckForInexactConversion);
    830 
    831     // Jump to not_int32 if the operation did not succeed.
    832     __ b(ne, not_int32);
    833     // Get the result in the destination register.
    834     __ vmov(dst, single_scratch);
    835 
    836   } else {
    837     // Load the double value in the destination registers.
    838     __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
    839     __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
    840 
    841     // Check for 0 and -0.
    842     __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
    843     __ orr(dst, scratch2, Operand(dst));
    844     __ cmp(dst, Operand(0));
    845     __ b(eq, &done);
    846 
    847     DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
    848 
    849     // Registers state after DoubleIs32BitInteger.
    850     // dst: mantissa[51:20].
    851     // scratch2: 1
    852 
    853     // Shift back the higher bits of the mantissa.
    854     __ mov(dst, Operand(dst, LSR, scratch3));
    855     // Set the implicit first bit.
    856     __ rsb(scratch3, scratch3, Operand(32));
    857     __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
    858     // Set the sign.
    859     __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
    860     __ tst(scratch1, Operand(HeapNumber::kSignMask));
    861     __ rsb(dst, dst, Operand(0), LeaveCC, mi);
    862   }
    863 
    864   __ bind(&done);
    865 }
    866 
    867 
    868 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
    869                                                Register src1,
    870                                                Register src2,
    871                                                Register dst,
    872                                                Register scratch,
    873                                                Label* not_int32) {
    874   // Get exponent alone in scratch.
    875   __ Ubfx(scratch,
    876           src1,
    877           HeapNumber::kExponentShift,
    878           HeapNumber::kExponentBits);
    879 
    880   // Substract the bias from the exponent.
    881   __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
    882 
    883   // src1: higher (exponent) part of the double value.
    884   // src2: lower (mantissa) part of the double value.
    885   // scratch: unbiased exponent.
    886 
    887   // Fast cases. Check for obvious non 32-bit integer values.
    888   // Negative exponent cannot yield 32-bit integers.
    889   __ b(mi, not_int32);
    890   // Exponent greater than 31 cannot yield 32-bit integers.
    891   // Also, a positive value with an exponent equal to 31 is outside of the
    892   // signed 32-bit integer range.
    893   // Another way to put it is that if (exponent - signbit) > 30 then the
    894   // number cannot be represented as an int32.
    895   Register tmp = dst;
    896   __ sub(tmp, scratch, Operand(src1, LSR, 31));
    897   __ cmp(tmp, Operand(30));
    898   __ b(gt, not_int32);
    899   // - Bits [21:0] in the mantissa are not null.
    900   __ tst(src2, Operand(0x3fffff));
    901   __ b(ne, not_int32);
    902 
    903   // Otherwise the exponent needs to be big enough to shift left all the
    904   // non zero bits left. So we need the (30 - exponent) last bits of the
    905   // 31 higher bits of the mantissa to be null.
    906   // Because bits [21:0] are null, we can check instead that the
    907   // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
    908 
    909   // Get the 32 higher bits of the mantissa in dst.
    910   __ Ubfx(dst,
    911           src2,
    912           HeapNumber::kMantissaBitsInTopWord,
    913           32 - HeapNumber::kMantissaBitsInTopWord);
    914   __ orr(dst,
    915          dst,
    916          Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
    917 
    918   // Create the mask and test the lower bits (of the higher bits).
    919   __ rsb(scratch, scratch, Operand(32));
    920   __ mov(src2, Operand(1));
    921   __ mov(src1, Operand(src2, LSL, scratch));
    922   __ sub(src1, src1, Operand(1));
    923   __ tst(dst, src1);
    924   __ b(ne, not_int32);
    925 }
    926 
    927 
    928 void FloatingPointHelper::CallCCodeForDoubleOperation(
    929     MacroAssembler* masm,
    930     Token::Value op,
    931     Register heap_number_result,
    932     Register scratch) {
    933   // Using core registers:
    934   // r0: Left value (least significant part of mantissa).
    935   // r1: Left value (sign, exponent, top of mantissa).
    936   // r2: Right value (least significant part of mantissa).
    937   // r3: Right value (sign, exponent, top of mantissa).
    938 
    939   // Assert that heap_number_result is callee-saved.
    940   // We currently always use r5 to pass it.
    941   ASSERT(heap_number_result.is(r5));
    942 
    943   // Push the current return address before the C call. Return will be
    944   // through pop(pc) below.
    945   __ push(lr);
    946   __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
    947   // Call C routine that may not cause GC or other trouble.
    948   __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
    949                    4);
    950   // Store answer in the overwritable heap number. Double returned in
    951   // registers r0 and r1.
    952   __ Strd(r0, r1, FieldMemOperand(heap_number_result,
    953                                   HeapNumber::kValueOffset));
    954   // Place heap_number_result in r0 and return to the pushed return address.
    955   __ mov(r0, Operand(heap_number_result));
    956   __ pop(pc);
    957 }
    958 
    959 
    960 // See comment for class.
    961 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
    962   Label max_negative_int;
    963   // the_int_ has the answer which is a signed int32 but not a Smi.
    964   // We test for the special value that has a different exponent.  This test
    965   // has the neat side effect of setting the flags according to the sign.
    966   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
    967   __ cmp(the_int_, Operand(0x80000000u));
    968   __ b(eq, &max_negative_int);
    969   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
    970   // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
    971   uint32_t non_smi_exponent =
    972       (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
    973   __ mov(scratch_, Operand(non_smi_exponent));
    974   // Set the sign bit in scratch_ if the value was negative.
    975   __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
    976   // Subtract from 0 if the value was negative.
    977   __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
    978   // We should be masking the implict first digit of the mantissa away here,
    979   // but it just ends up combining harmlessly with the last digit of the
    980   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
    981   // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
    982   ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
    983   const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
    984   __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
    985   __ str(scratch_, FieldMemOperand(the_heap_number_,
    986                                    HeapNumber::kExponentOffset));
    987   __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
    988   __ str(scratch_, FieldMemOperand(the_heap_number_,
    989                                    HeapNumber::kMantissaOffset));
    990   __ Ret();
    991 
    992   __ bind(&max_negative_int);
    993   // The max negative int32 is stored as a positive number in the mantissa of
    994   // a double because it uses a sign bit instead of using two's complement.
    995   // The actual mantissa bits stored are all 0 because the implicit most
    996   // significant 1 bit is not stored.
    997   non_smi_exponent += 1 << HeapNumber::kExponentShift;
    998   __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
    999   __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
   1000   __ mov(ip, Operand(0, RelocInfo::NONE));
   1001   __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
   1002   __ Ret();
   1003 }
   1004 
   1005 
   1006 // Handle the case where the lhs and rhs are the same object.
   1007 // Equality is almost reflexive (everything but NaN), so this is a test
   1008 // for "identity and not NaN".
   1009 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
   1010                                           Label* slow,
   1011                                           Condition cond,
   1012                                           bool never_nan_nan) {
   1013   Label not_identical;
   1014   Label heap_number, return_equal;
   1015   __ cmp(r0, r1);
   1016   __ b(ne, &not_identical);
   1017 
   1018   // The two objects are identical.  If we know that one of them isn't NaN then
   1019   // we now know they test equal.
   1020   if (cond != eq || !never_nan_nan) {
   1021     // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
   1022     // so we do the second best thing - test it ourselves.
   1023     // They are both equal and they are not both Smis so both of them are not
   1024     // Smis.  If it's not a heap number, then return equal.
   1025     if (cond == lt || cond == gt) {
   1026       __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
   1027       __ b(ge, slow);
   1028     } else {
   1029       __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
   1030       __ b(eq, &heap_number);
   1031       // Comparing JS objects with <=, >= is complicated.
   1032       if (cond != eq) {
   1033         __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
   1034         __ b(ge, slow);
   1035         // Normally here we fall through to return_equal, but undefined is
   1036         // special: (undefined == undefined) == true, but
   1037         // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
   1038         if (cond == le || cond == ge) {
   1039           __ cmp(r4, Operand(ODDBALL_TYPE));
   1040           __ b(ne, &return_equal);
   1041           __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   1042           __ cmp(r0, r2);
   1043           __ b(ne, &return_equal);
   1044           if (cond == le) {
   1045             // undefined <= undefined should fail.
   1046             __ mov(r0, Operand(GREATER));
   1047           } else  {
   1048             // undefined >= undefined should fail.
   1049             __ mov(r0, Operand(LESS));
   1050           }
   1051           __ Ret();
   1052         }
   1053       }
   1054     }
   1055   }
   1056 
   1057   __ bind(&return_equal);
   1058   if (cond == lt) {
   1059     __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
   1060   } else if (cond == gt) {
   1061     __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
   1062   } else {
   1063     __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
   1064   }
   1065   __ Ret();
   1066 
   1067   if (cond != eq || !never_nan_nan) {
   1068     // For less and greater we don't have to check for NaN since the result of
   1069     // x < x is false regardless.  For the others here is some code to check
   1070     // for NaN.
   1071     if (cond != lt && cond != gt) {
   1072       __ bind(&heap_number);
   1073       // It is a heap number, so return non-equal if it's NaN and equal if it's
   1074       // not NaN.
   1075 
   1076       // The representation of NaN values has all exponent bits (52..62) set,
   1077       // and not all mantissa bits (0..51) clear.
   1078       // Read top bits of double representation (second word of value).
   1079       __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
   1080       // Test that exponent bits are all set.
   1081       __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
   1082       // NaNs have all-one exponents so they sign extend to -1.
   1083       __ cmp(r3, Operand(-1));
   1084       __ b(ne, &return_equal);
   1085 
   1086       // Shift out flag and all exponent bits, retaining only mantissa.
   1087       __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
   1088       // Or with all low-bits of mantissa.
   1089       __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
   1090       __ orr(r0, r3, Operand(r2), SetCC);
   1091       // For equal we already have the right value in r0:  Return zero (equal)
   1092       // if all bits in mantissa are zero (it's an Infinity) and non-zero if
   1093       // not (it's a NaN).  For <= and >= we need to load r0 with the failing
   1094       // value if it's a NaN.
   1095       if (cond != eq) {
   1096         // All-zero means Infinity means equal.
   1097         __ Ret(eq);
   1098         if (cond == le) {
   1099           __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
   1100         } else {
   1101           __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
   1102         }
   1103       }
   1104       __ Ret();
   1105     }
   1106     // No fall through here.
   1107   }
   1108 
   1109   __ bind(&not_identical);
   1110 }
   1111 
   1112 
   1113 // See comment at call site.
   1114 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
   1115                                     Register lhs,
   1116                                     Register rhs,
   1117                                     Label* lhs_not_nan,
   1118                                     Label* slow,
   1119                                     bool strict) {
   1120   ASSERT((lhs.is(r0) && rhs.is(r1)) ||
   1121          (lhs.is(r1) && rhs.is(r0)));
   1122 
   1123   Label rhs_is_smi;
   1124   __ tst(rhs, Operand(kSmiTagMask));
   1125   __ b(eq, &rhs_is_smi);
   1126 
   1127   // Lhs is a Smi.  Check whether the rhs is a heap number.
   1128   __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
   1129   if (strict) {
   1130     // If rhs is not a number and lhs is a Smi then strict equality cannot
   1131     // succeed.  Return non-equal
   1132     // If rhs is r0 then there is already a non zero value in it.
   1133     if (!rhs.is(r0)) {
   1134       __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
   1135     }
   1136     __ Ret(ne);
   1137   } else {
   1138     // Smi compared non-strictly with a non-Smi non-heap-number.  Call
   1139     // the runtime.
   1140     __ b(ne, slow);
   1141   }
   1142 
   1143   // Lhs is a smi, rhs is a number.
   1144   if (CpuFeatures::IsSupported(VFP3)) {
   1145     // Convert lhs to a double in d7.
   1146     CpuFeatures::Scope scope(VFP3);
   1147     __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
   1148     // Load the double from rhs, tagged HeapNumber r0, to d6.
   1149     __ sub(r7, rhs, Operand(kHeapObjectTag));
   1150     __ vldr(d6, r7, HeapNumber::kValueOffset);
   1151   } else {
   1152     __ push(lr);
   1153     // Convert lhs to a double in r2, r3.
   1154     __ mov(r7, Operand(lhs));
   1155     ConvertToDoubleStub stub1(r3, r2, r7, r6);
   1156     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
   1157     // Load rhs to a double in r0, r1.
   1158     __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
   1159     __ pop(lr);
   1160   }
   1161 
   1162   // We now have both loaded as doubles but we can skip the lhs nan check
   1163   // since it's a smi.
   1164   __ jmp(lhs_not_nan);
   1165 
   1166   __ bind(&rhs_is_smi);
   1167   // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
   1168   __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
   1169   if (strict) {
   1170     // If lhs is not a number and rhs is a smi then strict equality cannot
   1171     // succeed.  Return non-equal.
   1172     // If lhs is r0 then there is already a non zero value in it.
   1173     if (!lhs.is(r0)) {
   1174       __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
   1175     }
   1176     __ Ret(ne);
   1177   } else {
   1178     // Smi compared non-strictly with a non-smi non-heap-number.  Call
   1179     // the runtime.
   1180     __ b(ne, slow);
   1181   }
   1182 
   1183   // Rhs is a smi, lhs is a heap number.
   1184   if (CpuFeatures::IsSupported(VFP3)) {
   1185     CpuFeatures::Scope scope(VFP3);
   1186     // Load the double from lhs, tagged HeapNumber r1, to d7.
   1187     __ sub(r7, lhs, Operand(kHeapObjectTag));
   1188     __ vldr(d7, r7, HeapNumber::kValueOffset);
   1189     // Convert rhs to a double in d6              .
   1190     __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
   1191   } else {
   1192     __ push(lr);
   1193     // Load lhs to a double in r2, r3.
   1194     __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
   1195     // Convert rhs to a double in r0, r1.
   1196     __ mov(r7, Operand(rhs));
   1197     ConvertToDoubleStub stub2(r1, r0, r7, r6);
   1198     __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
   1199     __ pop(lr);
   1200   }
   1201   // Fall through to both_loaded_as_doubles.
   1202 }
   1203 
   1204 
   1205 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
   1206   bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
   1207   Register rhs_exponent = exp_first ? r0 : r1;
   1208   Register lhs_exponent = exp_first ? r2 : r3;
   1209   Register rhs_mantissa = exp_first ? r1 : r0;
   1210   Register lhs_mantissa = exp_first ? r3 : r2;
   1211   Label one_is_nan, neither_is_nan;
   1212 
   1213   __ Sbfx(r4,
   1214           lhs_exponent,
   1215           HeapNumber::kExponentShift,
   1216           HeapNumber::kExponentBits);
   1217   // NaNs have all-one exponents so they sign extend to -1.
   1218   __ cmp(r4, Operand(-1));
   1219   __ b(ne, lhs_not_nan);
   1220   __ mov(r4,
   1221          Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
   1222          SetCC);
   1223   __ b(ne, &one_is_nan);
   1224   __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
   1225   __ b(ne, &one_is_nan);
   1226 
   1227   __ bind(lhs_not_nan);
   1228   __ Sbfx(r4,
   1229           rhs_exponent,
   1230           HeapNumber::kExponentShift,
   1231           HeapNumber::kExponentBits);
   1232   // NaNs have all-one exponents so they sign extend to -1.
   1233   __ cmp(r4, Operand(-1));
   1234   __ b(ne, &neither_is_nan);
   1235   __ mov(r4,
   1236          Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
   1237          SetCC);
   1238   __ b(ne, &one_is_nan);
   1239   __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
   1240   __ b(eq, &neither_is_nan);
   1241 
   1242   __ bind(&one_is_nan);
   1243   // NaN comparisons always fail.
   1244   // Load whatever we need in r0 to make the comparison fail.
   1245   if (cond == lt || cond == le) {
   1246     __ mov(r0, Operand(GREATER));
   1247   } else {
   1248     __ mov(r0, Operand(LESS));
   1249   }
   1250   __ Ret();
   1251 
   1252   __ bind(&neither_is_nan);
   1253 }
   1254 
   1255 
   1256 // See comment at call site.
   1257 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
   1258                                           Condition cond) {
   1259   bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
   1260   Register rhs_exponent = exp_first ? r0 : r1;
   1261   Register lhs_exponent = exp_first ? r2 : r3;
   1262   Register rhs_mantissa = exp_first ? r1 : r0;
   1263   Register lhs_mantissa = exp_first ? r3 : r2;
   1264 
   1265   // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
   1266   if (cond == eq) {
   1267     // Doubles are not equal unless they have the same bit pattern.
   1268     // Exception: 0 and -0.
   1269     __ cmp(rhs_mantissa, Operand(lhs_mantissa));
   1270     __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
   1271     // Return non-zero if the numbers are unequal.
   1272     __ Ret(ne);
   1273 
   1274     __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
   1275     // If exponents are equal then return 0.
   1276     __ Ret(eq);
   1277 
   1278     // Exponents are unequal.  The only way we can return that the numbers
   1279     // are equal is if one is -0 and the other is 0.  We already dealt
   1280     // with the case where both are -0 or both are 0.
   1281     // We start by seeing if the mantissas (that are equal) or the bottom
   1282     // 31 bits of the rhs exponent are non-zero.  If so we return not
   1283     // equal.
   1284     __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
   1285     __ mov(r0, Operand(r4), LeaveCC, ne);
   1286     __ Ret(ne);
   1287     // Now they are equal if and only if the lhs exponent is zero in its
   1288     // low 31 bits.
   1289     __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
   1290     __ Ret();
   1291   } else {
   1292     // Call a native function to do a comparison between two non-NaNs.
   1293     // Call C routine that may not cause GC or other trouble.
   1294     __ push(lr);
   1295     __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
   1296     __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
   1297     __ pop(pc);  // Return.
   1298   }
   1299 }
   1300 
   1301 
   1302 // See comment at call site.
   1303 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
   1304                                            Register lhs,
   1305                                            Register rhs) {
   1306     ASSERT((lhs.is(r0) && rhs.is(r1)) ||
   1307            (lhs.is(r1) && rhs.is(r0)));
   1308 
   1309     // If either operand is a JSObject or an oddball value, then they are
   1310     // not equal since their pointers are different.
   1311     // There is no test for undetectability in strict equality.
   1312     STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   1313     Label first_non_object;
   1314     // Get the type of the first operand into r2 and compare it with
   1315     // FIRST_JS_OBJECT_TYPE.
   1316     __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
   1317     __ b(lt, &first_non_object);
   1318 
   1319     // Return non-zero (r0 is not zero)
   1320     Label return_not_equal;
   1321     __ bind(&return_not_equal);
   1322     __ Ret();
   1323 
   1324     __ bind(&first_non_object);
   1325     // Check for oddballs: true, false, null, undefined.
   1326     __ cmp(r2, Operand(ODDBALL_TYPE));
   1327     __ b(eq, &return_not_equal);
   1328 
   1329     __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
   1330     __ b(ge, &return_not_equal);
   1331 
   1332     // Check for oddballs: true, false, null, undefined.
   1333     __ cmp(r3, Operand(ODDBALL_TYPE));
   1334     __ b(eq, &return_not_equal);
   1335 
   1336     // Now that we have the types we might as well check for symbol-symbol.
   1337     // Ensure that no non-strings have the symbol bit set.
   1338     STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
   1339     STATIC_ASSERT(kSymbolTag != 0);
   1340     __ and_(r2, r2, Operand(r3));
   1341     __ tst(r2, Operand(kIsSymbolMask));
   1342     __ b(ne, &return_not_equal);
   1343 }
   1344 
   1345 
   1346 // See comment at call site.
   1347 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
   1348                                        Register lhs,
   1349                                        Register rhs,
   1350                                        Label* both_loaded_as_doubles,
   1351                                        Label* not_heap_numbers,
   1352                                        Label* slow) {
   1353   ASSERT((lhs.is(r0) && rhs.is(r1)) ||
   1354          (lhs.is(r1) && rhs.is(r0)));
   1355 
   1356   __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
   1357   __ b(ne, not_heap_numbers);
   1358   __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
   1359   __ cmp(r2, r3);
   1360   __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
   1361 
   1362   // Both are heap numbers.  Load them up then jump to the code we have
   1363   // for that.
   1364   if (CpuFeatures::IsSupported(VFP3)) {
   1365     CpuFeatures::Scope scope(VFP3);
   1366     __ sub(r7, rhs, Operand(kHeapObjectTag));
   1367     __ vldr(d6, r7, HeapNumber::kValueOffset);
   1368     __ sub(r7, lhs, Operand(kHeapObjectTag));
   1369     __ vldr(d7, r7, HeapNumber::kValueOffset);
   1370   } else {
   1371     __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
   1372     __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
   1373   }
   1374   __ jmp(both_loaded_as_doubles);
   1375 }
   1376 
   1377 
   1378 // Fast negative check for symbol-to-symbol equality.
   1379 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
   1380                                          Register lhs,
   1381                                          Register rhs,
   1382                                          Label* possible_strings,
   1383                                          Label* not_both_strings) {
   1384   ASSERT((lhs.is(r0) && rhs.is(r1)) ||
   1385          (lhs.is(r1) && rhs.is(r0)));
   1386 
   1387   // r2 is object type of rhs.
   1388   // Ensure that no non-strings have the symbol bit set.
   1389   Label object_test;
   1390   STATIC_ASSERT(kSymbolTag != 0);
   1391   __ tst(r2, Operand(kIsNotStringMask));
   1392   __ b(ne, &object_test);
   1393   __ tst(r2, Operand(kIsSymbolMask));
   1394   __ b(eq, possible_strings);
   1395   __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
   1396   __ b(ge, not_both_strings);
   1397   __ tst(r3, Operand(kIsSymbolMask));
   1398   __ b(eq, possible_strings);
   1399 
   1400   // Both are symbols.  We already checked they weren't the same pointer
   1401   // so they are not equal.
   1402   __ mov(r0, Operand(NOT_EQUAL));
   1403   __ Ret();
   1404 
   1405   __ bind(&object_test);
   1406   __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
   1407   __ b(lt, not_both_strings);
   1408   __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
   1409   __ b(lt, not_both_strings);
   1410   // If both objects are undetectable, they are equal. Otherwise, they
   1411   // are not equal, since they are different objects and an object is not
   1412   // equal to undefined.
   1413   __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
   1414   __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
   1415   __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
   1416   __ and_(r0, r2, Operand(r3));
   1417   __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
   1418   __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
   1419   __ Ret();
   1420 }
   1421 
   1422 
   1423 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
   1424                                                          Register object,
   1425                                                          Register result,
   1426                                                          Register scratch1,
   1427                                                          Register scratch2,
   1428                                                          Register scratch3,
   1429                                                          bool object_is_smi,
   1430                                                          Label* not_found) {
   1431   // Use of registers. Register result is used as a temporary.
   1432   Register number_string_cache = result;
   1433   Register mask = scratch3;
   1434 
   1435   // Load the number string cache.
   1436   __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
   1437 
   1438   // Make the hash mask from the length of the number string cache. It
   1439   // contains two elements (number and string) for each cache entry.
   1440   __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
   1441   // Divide length by two (length is a smi).
   1442   __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
   1443   __ sub(mask, mask, Operand(1));  // Make mask.
   1444 
   1445   // Calculate the entry in the number string cache. The hash value in the
   1446   // number string cache for smis is just the smi value, and the hash for
   1447   // doubles is the xor of the upper and lower words. See
   1448   // Heap::GetNumberStringCache.
   1449   Isolate* isolate = masm->isolate();
   1450   Label is_smi;
   1451   Label load_result_from_cache;
   1452   if (!object_is_smi) {
   1453     __ JumpIfSmi(object, &is_smi);
   1454     if (CpuFeatures::IsSupported(VFP3)) {
   1455       CpuFeatures::Scope scope(VFP3);
   1456       __ CheckMap(object,
   1457                   scratch1,
   1458                   Heap::kHeapNumberMapRootIndex,
   1459                   not_found,
   1460                   true);
   1461 
   1462       STATIC_ASSERT(8 == kDoubleSize);
   1463       __ add(scratch1,
   1464              object,
   1465              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
   1466       __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
   1467       __ eor(scratch1, scratch1, Operand(scratch2));
   1468       __ and_(scratch1, scratch1, Operand(mask));
   1469 
   1470       // Calculate address of entry in string cache: each entry consists
   1471       // of two pointer sized fields.
   1472       __ add(scratch1,
   1473              number_string_cache,
   1474              Operand(scratch1, LSL, kPointerSizeLog2 + 1));
   1475 
   1476       Register probe = mask;
   1477       __ ldr(probe,
   1478              FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   1479       __ JumpIfSmi(probe, not_found);
   1480       __ sub(scratch2, object, Operand(kHeapObjectTag));
   1481       __ vldr(d0, scratch2, HeapNumber::kValueOffset);
   1482       __ sub(probe, probe, Operand(kHeapObjectTag));
   1483       __ vldr(d1, probe, HeapNumber::kValueOffset);
   1484       __ VFPCompareAndSetFlags(d0, d1);
   1485       __ b(ne, not_found);  // The cache did not contain this value.
   1486       __ b(&load_result_from_cache);
   1487     } else {
   1488       __ b(not_found);
   1489     }
   1490   }
   1491 
   1492   __ bind(&is_smi);
   1493   Register scratch = scratch1;
   1494   __ and_(scratch, mask, Operand(object, ASR, 1));
   1495   // Calculate address of entry in string cache: each entry consists
   1496   // of two pointer sized fields.
   1497   __ add(scratch,
   1498          number_string_cache,
   1499          Operand(scratch, LSL, kPointerSizeLog2 + 1));
   1500 
   1501   // Check if the entry is the smi we are looking for.
   1502   Register probe = mask;
   1503   __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
   1504   __ cmp(object, probe);
   1505   __ b(ne, not_found);
   1506 
   1507   // Get the result from the cache.
   1508   __ bind(&load_result_from_cache);
   1509   __ ldr(result,
   1510          FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
   1511   __ IncrementCounter(isolate->counters()->number_to_string_native(),
   1512                       1,
   1513                       scratch1,
   1514                       scratch2);
   1515 }
   1516 
   1517 
   1518 void NumberToStringStub::Generate(MacroAssembler* masm) {
   1519   Label runtime;
   1520 
   1521   __ ldr(r1, MemOperand(sp, 0));
   1522 
   1523   // Generate code to lookup number in the number string cache.
   1524   GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
   1525   __ add(sp, sp, Operand(1 * kPointerSize));
   1526   __ Ret();
   1527 
   1528   __ bind(&runtime);
   1529   // Handle number to string in the runtime system if not found in the cache.
   1530   __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
   1531 }
   1532 
   1533 
   1534 // On entry lhs_ and rhs_ are the values to be compared.
   1535 // On exit r0 is 0, positive or negative to indicate the result of
   1536 // the comparison.
   1537 void CompareStub::Generate(MacroAssembler* masm) {
   1538   ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
   1539          (lhs_.is(r1) && rhs_.is(r0)));
   1540 
   1541   Label slow;  // Call builtin.
   1542   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
   1543 
   1544   if (include_smi_compare_) {
   1545     Label not_two_smis, smi_done;
   1546     __ orr(r2, r1, r0);
   1547     __ tst(r2, Operand(kSmiTagMask));
   1548     __ b(ne, &not_two_smis);
   1549     __ mov(r1, Operand(r1, ASR, 1));
   1550     __ sub(r0, r1, Operand(r0, ASR, 1));
   1551     __ Ret();
   1552     __ bind(&not_two_smis);
   1553   } else if (FLAG_debug_code) {
   1554     __ orr(r2, r1, r0);
   1555     __ tst(r2, Operand(kSmiTagMask));
   1556     __ Assert(ne, "CompareStub: unexpected smi operands.");
   1557   }
   1558 
   1559   // NOTICE! This code is only reached after a smi-fast-case check, so
   1560   // it is certain that at least one operand isn't a smi.
   1561 
   1562   // Handle the case where the objects are identical.  Either returns the answer
   1563   // or goes to slow.  Only falls through if the objects were not identical.
   1564   EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
   1565 
   1566   // If either is a Smi (we know that not both are), then they can only
   1567   // be strictly equal if the other is a HeapNumber.
   1568   STATIC_ASSERT(kSmiTag == 0);
   1569   ASSERT_EQ(0, Smi::FromInt(0));
   1570   __ and_(r2, lhs_, Operand(rhs_));
   1571   __ tst(r2, Operand(kSmiTagMask));
   1572   __ b(ne, &not_smis);
   1573   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
   1574   // 1) Return the answer.
   1575   // 2) Go to slow.
   1576   // 3) Fall through to both_loaded_as_doubles.
   1577   // 4) Jump to lhs_not_nan.
   1578   // In cases 3 and 4 we have found out we were dealing with a number-number
   1579   // comparison.  If VFP3 is supported the double values of the numbers have
   1580   // been loaded into d7 and d6.  Otherwise, the double values have been loaded
   1581   // into r0, r1, r2, and r3.
   1582   EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
   1583 
   1584   __ bind(&both_loaded_as_doubles);
   1585   // The arguments have been converted to doubles and stored in d6 and d7, if
   1586   // VFP3 is supported, or in r0, r1, r2, and r3.
   1587   Isolate* isolate = masm->isolate();
   1588   if (CpuFeatures::IsSupported(VFP3)) {
   1589     __ bind(&lhs_not_nan);
   1590     CpuFeatures::Scope scope(VFP3);
   1591     Label no_nan;
   1592     // ARMv7 VFP3 instructions to implement double precision comparison.
   1593     __ VFPCompareAndSetFlags(d7, d6);
   1594     Label nan;
   1595     __ b(vs, &nan);
   1596     __ mov(r0, Operand(EQUAL), LeaveCC, eq);
   1597     __ mov(r0, Operand(LESS), LeaveCC, lt);
   1598     __ mov(r0, Operand(GREATER), LeaveCC, gt);
   1599     __ Ret();
   1600 
   1601     __ bind(&nan);
   1602     // If one of the sides was a NaN then the v flag is set.  Load r0 with
   1603     // whatever it takes to make the comparison fail, since comparisons with NaN
   1604     // always fail.
   1605     if (cc_ == lt || cc_ == le) {
   1606       __ mov(r0, Operand(GREATER));
   1607     } else {
   1608       __ mov(r0, Operand(LESS));
   1609     }
   1610     __ Ret();
   1611   } else {
   1612     // Checks for NaN in the doubles we have loaded.  Can return the answer or
   1613     // fall through if neither is a NaN.  Also binds lhs_not_nan.
   1614     EmitNanCheck(masm, &lhs_not_nan, cc_);
   1615     // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
   1616     // answer.  Never falls through.
   1617     EmitTwoNonNanDoubleComparison(masm, cc_);
   1618   }
   1619 
   1620   __ bind(&not_smis);
   1621   // At this point we know we are dealing with two different objects,
   1622   // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
   1623   if (strict_) {
   1624     // This returns non-equal for some object types, or falls through if it
   1625     // was not lucky.
   1626     EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
   1627   }
   1628 
   1629   Label check_for_symbols;
   1630   Label flat_string_check;
   1631   // Check for heap-number-heap-number comparison.  Can jump to slow case,
   1632   // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
   1633   // that case.  If the inputs are not doubles then jumps to check_for_symbols.
   1634   // In this case r2 will contain the type of rhs_.  Never falls through.
   1635   EmitCheckForTwoHeapNumbers(masm,
   1636                              lhs_,
   1637                              rhs_,
   1638                              &both_loaded_as_doubles,
   1639                              &check_for_symbols,
   1640                              &flat_string_check);
   1641 
   1642   __ bind(&check_for_symbols);
   1643   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
   1644   // symbols.
   1645   if (cc_ == eq && !strict_) {
   1646     // Returns an answer for two symbols or two detectable objects.
   1647     // Otherwise jumps to string case or not both strings case.
   1648     // Assumes that r2 is the type of rhs_ on entry.
   1649     EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
   1650   }
   1651 
   1652   // Check for both being sequential ASCII strings, and inline if that is the
   1653   // case.
   1654   __ bind(&flat_string_check);
   1655 
   1656   __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
   1657 
   1658   __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
   1659   StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
   1660                                                      lhs_,
   1661                                                      rhs_,
   1662                                                      r2,
   1663                                                      r3,
   1664                                                      r4,
   1665                                                      r5);
   1666   // Never falls through to here.
   1667 
   1668   __ bind(&slow);
   1669 
   1670   __ Push(lhs_, rhs_);
   1671   // Figure out which native to call and setup the arguments.
   1672   Builtins::JavaScript native;
   1673   if (cc_ == eq) {
   1674     native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
   1675   } else {
   1676     native = Builtins::COMPARE;
   1677     int ncr;  // NaN compare result
   1678     if (cc_ == lt || cc_ == le) {
   1679       ncr = GREATER;
   1680     } else {
   1681       ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
   1682       ncr = LESS;
   1683     }
   1684     __ mov(r0, Operand(Smi::FromInt(ncr)));
   1685     __ push(r0);
   1686   }
   1687 
   1688   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   1689   // tagged as a small integer.
   1690   __ InvokeBuiltin(native, JUMP_JS);
   1691 }
   1692 
   1693 
   1694 // This stub does not handle the inlined cases (Smis, Booleans, undefined).
   1695 // The stub returns zero for false, and a non-zero value for true.
   1696 void ToBooleanStub::Generate(MacroAssembler* masm) {
   1697   // This stub uses VFP3 instructions.
   1698   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1699 
   1700   Label false_result;
   1701   Label not_heap_number;
   1702   Register scratch = r9.is(tos_) ? r7 : r9;
   1703 
   1704   __ LoadRoot(ip, Heap::kNullValueRootIndex);
   1705   __ cmp(tos_, ip);
   1706   __ b(eq, &false_result);
   1707 
   1708   // HeapNumber => false iff +0, -0, or NaN.
   1709   __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
   1710   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   1711   __ cmp(scratch, ip);
   1712   __ b(&not_heap_number, ne);
   1713 
   1714   __ sub(ip, tos_, Operand(kHeapObjectTag));
   1715   __ vldr(d1, ip, HeapNumber::kValueOffset);
   1716   __ VFPCompareAndSetFlags(d1, 0.0);
   1717   // "tos_" is a register, and contains a non zero value by default.
   1718   // Hence we only need to overwrite "tos_" with zero to return false for
   1719   // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
   1720   __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO
   1721   __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN
   1722   __ Ret();
   1723 
   1724   __ bind(&not_heap_number);
   1725 
   1726   // Check if the value is 'null'.
   1727   // 'null' => false.
   1728   __ LoadRoot(ip, Heap::kNullValueRootIndex);
   1729   __ cmp(tos_, ip);
   1730   __ b(&false_result, eq);
   1731 
   1732   // It can be an undetectable object.
   1733   // Undetectable => false.
   1734   __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
   1735   __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
   1736   __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
   1737   __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
   1738   __ b(&false_result, eq);
   1739 
   1740   // JavaScript object => true.
   1741   __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
   1742   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1743   __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
   1744   // "tos_" is a register and contains a non-zero value.
   1745   // Hence we implicitly return true if the greater than
   1746   // condition is satisfied.
   1747   __ Ret(gt);
   1748 
   1749   // Check for string
   1750   __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
   1751   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1752   __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
   1753   // "tos_" is a register and contains a non-zero value.
   1754   // Hence we implicitly return true if the greater than
   1755   // condition is satisfied.
   1756   __ Ret(gt);
   1757 
   1758   // String value => false iff empty, i.e., length is zero
   1759   __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
   1760   // If length is zero, "tos_" contains zero ==> false.
   1761   // If length is not zero, "tos_" contains a non-zero value ==> true.
   1762   __ Ret();
   1763 
   1764   // Return 0 in "tos_" for false .
   1765   __ bind(&false_result);
   1766   __ mov(tos_, Operand(0, RelocInfo::NONE));
   1767   __ Ret();
   1768 }
   1769 
   1770 
   1771 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
   1772     TRBinaryOpIC::TypeInfo type_info,
   1773     TRBinaryOpIC::TypeInfo result_type_info) {
   1774   TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
   1775   return stub.GetCode();
   1776 }
   1777 
   1778 
   1779 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   1780   Label get_result;
   1781 
   1782   __ Push(r1, r0);
   1783 
   1784   __ mov(r2, Operand(Smi::FromInt(MinorKey())));
   1785   __ mov(r1, Operand(Smi::FromInt(op_)));
   1786   __ mov(r0, Operand(Smi::FromInt(operands_type_)));
   1787   __ Push(r2, r1, r0);
   1788 
   1789   __ TailCallExternalReference(
   1790       ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
   1791                         masm->isolate()),
   1792       5,
   1793       1);
   1794 }
   1795 
   1796 
   1797 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
   1798     MacroAssembler* masm) {
   1799   UNIMPLEMENTED();
   1800 }
   1801 
   1802 
   1803 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
   1804   switch (operands_type_) {
   1805     case TRBinaryOpIC::UNINITIALIZED:
   1806       GenerateTypeTransition(masm);
   1807       break;
   1808     case TRBinaryOpIC::SMI:
   1809       GenerateSmiStub(masm);
   1810       break;
   1811     case TRBinaryOpIC::INT32:
   1812       GenerateInt32Stub(masm);
   1813       break;
   1814     case TRBinaryOpIC::HEAP_NUMBER:
   1815       GenerateHeapNumberStub(masm);
   1816       break;
   1817     case TRBinaryOpIC::ODDBALL:
   1818       GenerateOddballStub(masm);
   1819       break;
   1820     case TRBinaryOpIC::STRING:
   1821       GenerateStringStub(masm);
   1822       break;
   1823     case TRBinaryOpIC::GENERIC:
   1824       GenerateGeneric(masm);
   1825       break;
   1826     default:
   1827       UNREACHABLE();
   1828   }
   1829 }
   1830 
   1831 
   1832 const char* TypeRecordingBinaryOpStub::GetName() {
   1833   if (name_ != NULL) return name_;
   1834   const int kMaxNameLength = 100;
   1835   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
   1836       kMaxNameLength);
   1837   if (name_ == NULL) return "OOM";
   1838   const char* op_name = Token::Name(op_);
   1839   const char* overwrite_name;
   1840   switch (mode_) {
   1841     case NO_OVERWRITE: overwrite_name = "Alloc"; break;
   1842     case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
   1843     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
   1844     default: overwrite_name = "UnknownOverwrite"; break;
   1845   }
   1846 
   1847   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
   1848                "TypeRecordingBinaryOpStub_%s_%s_%s",
   1849                op_name,
   1850                overwrite_name,
   1851                TRBinaryOpIC::GetName(operands_type_));
   1852   return name_;
   1853 }
   1854 
   1855 
   1856 void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
   1857     MacroAssembler* masm) {
   1858   Register left = r1;
   1859   Register right = r0;
   1860   Register scratch1 = r7;
   1861   Register scratch2 = r9;
   1862 
   1863   ASSERT(right.is(r0));
   1864   STATIC_ASSERT(kSmiTag == 0);
   1865 
   1866   Label not_smi_result;
   1867   switch (op_) {
   1868     case Token::ADD:
   1869       __ add(right, left, Operand(right), SetCC);  // Add optimistically.
   1870       __ Ret(vc);
   1871       __ sub(right, right, Operand(left));  // Revert optimistic add.
   1872       break;
   1873     case Token::SUB:
   1874       __ sub(right, left, Operand(right), SetCC);  // Subtract optimistically.
   1875       __ Ret(vc);
   1876       __ sub(right, left, Operand(right));  // Revert optimistic subtract.
   1877       break;
   1878     case Token::MUL:
   1879       // Remove tag from one of the operands. This way the multiplication result
   1880       // will be a smi if it fits the smi range.
   1881       __ SmiUntag(ip, right);
   1882       // Do multiplication
   1883       // scratch1 = lower 32 bits of ip * left.
   1884       // scratch2 = higher 32 bits of ip * left.
   1885       __ smull(scratch1, scratch2, left, ip);
   1886       // Check for overflowing the smi range - no overflow if higher 33 bits of
   1887       // the result are identical.
   1888       __ mov(ip, Operand(scratch1, ASR, 31));
   1889       __ cmp(ip, Operand(scratch2));
   1890       __ b(ne, &not_smi_result);
   1891       // Go slow on zero result to handle -0.
   1892       __ tst(scratch1, Operand(scratch1));
   1893       __ mov(right, Operand(scratch1), LeaveCC, ne);
   1894       __ Ret(ne);
   1895       // We need -0 if we were multiplying a negative number with 0 to get 0.
   1896       // We know one of them was zero.
   1897       __ add(scratch2, right, Operand(left), SetCC);
   1898       __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
   1899       __ Ret(pl);  // Return smi 0 if the non-zero one was positive.
   1900       // We fall through here if we multiplied a negative number with 0, because
   1901       // that would mean we should produce -0.
   1902       break;
   1903     case Token::DIV:
   1904       // Check for power of two on the right hand side.
   1905       __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
   1906       // Check for positive and no remainder (scratch1 contains right - 1).
   1907       __ orr(scratch2, scratch1, Operand(0x80000000u));
   1908       __ tst(left, scratch2);
   1909       __ b(ne, &not_smi_result);
   1910 
   1911       // Perform division by shifting.
   1912       __ CountLeadingZeros(scratch1, scratch1, scratch2);
   1913       __ rsb(scratch1, scratch1, Operand(31));
   1914       __ mov(right, Operand(left, LSR, scratch1));
   1915       __ Ret();
   1916       break;
   1917     case Token::MOD:
   1918       // Check for two positive smis.
   1919       __ orr(scratch1, left, Operand(right));
   1920       __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
   1921       __ b(ne, &not_smi_result);
   1922 
   1923       // Check for power of two on the right hand side.
   1924       __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
   1925 
   1926       // Perform modulus by masking.
   1927       __ and_(right, left, Operand(scratch1));
   1928       __ Ret();
   1929       break;
   1930     case Token::BIT_OR:
   1931       __ orr(right, left, Operand(right));
   1932       __ Ret();
   1933       break;
   1934     case Token::BIT_AND:
   1935       __ and_(right, left, Operand(right));
   1936       __ Ret();
   1937       break;
   1938     case Token::BIT_XOR:
   1939       __ eor(right, left, Operand(right));
   1940       __ Ret();
   1941       break;
   1942     case Token::SAR:
   1943       // Remove tags from right operand.
   1944       __ GetLeastBitsFromSmi(scratch1, right, 5);
   1945       __ mov(right, Operand(left, ASR, scratch1));
   1946       // Smi tag result.
   1947       __ bic(right, right, Operand(kSmiTagMask));
   1948       __ Ret();
   1949       break;
   1950     case Token::SHR:
   1951       // Remove tags from operands. We can't do this on a 31 bit number
   1952       // because then the 0s get shifted into bit 30 instead of bit 31.
   1953       __ SmiUntag(scratch1, left);
   1954       __ GetLeastBitsFromSmi(scratch2, right, 5);
   1955       __ mov(scratch1, Operand(scratch1, LSR, scratch2));
   1956       // Unsigned shift is not allowed to produce a negative number, so
   1957       // check the sign bit and the sign bit after Smi tagging.
   1958       __ tst(scratch1, Operand(0xc0000000));
   1959       __ b(ne, &not_smi_result);
   1960       // Smi tag result.
   1961       __ SmiTag(right, scratch1);
   1962       __ Ret();
   1963       break;
   1964     case Token::SHL:
   1965       // Remove tags from operands.
   1966       __ SmiUntag(scratch1, left);
   1967       __ GetLeastBitsFromSmi(scratch2, right, 5);
   1968       __ mov(scratch1, Operand(scratch1, LSL, scratch2));
   1969       // Check that the signed result fits in a Smi.
   1970       __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
   1971       __ b(mi, &not_smi_result);
   1972       __ SmiTag(right, scratch1);
   1973       __ Ret();
   1974       break;
   1975     default:
   1976       UNREACHABLE();
   1977   }
   1978   __ bind(&not_smi_result);
   1979 }
   1980 
   1981 
   1982 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
   1983                                                     bool smi_operands,
   1984                                                     Label* not_numbers,
   1985                                                     Label* gc_required) {
   1986   Register left = r1;
   1987   Register right = r0;
   1988   Register scratch1 = r7;
   1989   Register scratch2 = r9;
   1990   Register scratch3 = r4;
   1991 
   1992   ASSERT(smi_operands || (not_numbers != NULL));
   1993   if (smi_operands && FLAG_debug_code) {
   1994     __ AbortIfNotSmi(left);
   1995     __ AbortIfNotSmi(right);
   1996   }
   1997 
   1998   Register heap_number_map = r6;
   1999   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   2000 
   2001   switch (op_) {
   2002     case Token::ADD:
   2003     case Token::SUB:
   2004     case Token::MUL:
   2005     case Token::DIV:
   2006     case Token::MOD: {
   2007       // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
   2008       // depending on whether VFP3 is available or not.
   2009       FloatingPointHelper::Destination destination =
   2010           CpuFeatures::IsSupported(VFP3) &&
   2011           op_ != Token::MOD ?
   2012           FloatingPointHelper::kVFPRegisters :
   2013           FloatingPointHelper::kCoreRegisters;
   2014 
   2015       // Allocate new heap number for result.
   2016       Register result = r5;
   2017       GenerateHeapResultAllocation(
   2018           masm, result, heap_number_map, scratch1, scratch2, gc_required);
   2019 
   2020       // Load the operands.
   2021       if (smi_operands) {
   2022         FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
   2023       } else {
   2024         FloatingPointHelper::LoadOperands(masm,
   2025                                           destination,
   2026                                           heap_number_map,
   2027                                           scratch1,
   2028                                           scratch2,
   2029                                           not_numbers);
   2030       }
   2031 
   2032       // Calculate the result.
   2033       if (destination == FloatingPointHelper::kVFPRegisters) {
   2034         // Using VFP registers:
   2035         // d6: Left value
   2036         // d7: Right value
   2037         CpuFeatures::Scope scope(VFP3);
   2038         switch (op_) {
   2039           case Token::ADD:
   2040             __ vadd(d5, d6, d7);
   2041             break;
   2042           case Token::SUB:
   2043             __ vsub(d5, d6, d7);
   2044             break;
   2045           case Token::MUL:
   2046             __ vmul(d5, d6, d7);
   2047             break;
   2048           case Token::DIV:
   2049             __ vdiv(d5, d6, d7);
   2050             break;
   2051           default:
   2052             UNREACHABLE();
   2053         }
   2054 
   2055         __ sub(r0, result, Operand(kHeapObjectTag));
   2056         __ vstr(d5, r0, HeapNumber::kValueOffset);
   2057         __ add(r0, r0, Operand(kHeapObjectTag));
   2058         __ Ret();
   2059       } else {
   2060         // Call the C function to handle the double operation.
   2061         FloatingPointHelper::CallCCodeForDoubleOperation(masm,
   2062                                                          op_,
   2063                                                          result,
   2064                                                          scratch1);
   2065         if (FLAG_debug_code) {
   2066           __ stop("Unreachable code.");
   2067         }
   2068       }
   2069       break;
   2070     }
   2071     case Token::BIT_OR:
   2072     case Token::BIT_XOR:
   2073     case Token::BIT_AND:
   2074     case Token::SAR:
   2075     case Token::SHR:
   2076     case Token::SHL: {
   2077       if (smi_operands) {
   2078         __ SmiUntag(r3, left);
   2079         __ SmiUntag(r2, right);
   2080       } else {
   2081         // Convert operands to 32-bit integers. Right in r2 and left in r3.
   2082         FloatingPointHelper::ConvertNumberToInt32(masm,
   2083                                                   left,
   2084                                                   r3,
   2085                                                   heap_number_map,
   2086                                                   scratch1,
   2087                                                   scratch2,
   2088                                                   scratch3,
   2089                                                   d0,
   2090                                                   not_numbers);
   2091         FloatingPointHelper::ConvertNumberToInt32(masm,
   2092                                                   right,
   2093                                                   r2,
   2094                                                   heap_number_map,
   2095                                                   scratch1,
   2096                                                   scratch2,
   2097                                                   scratch3,
   2098                                                   d0,
   2099                                                   not_numbers);
   2100       }
   2101 
   2102       Label result_not_a_smi;
   2103       switch (op_) {
   2104         case Token::BIT_OR:
   2105           __ orr(r2, r3, Operand(r2));
   2106           break;
   2107         case Token::BIT_XOR:
   2108           __ eor(r2, r3, Operand(r2));
   2109           break;
   2110         case Token::BIT_AND:
   2111           __ and_(r2, r3, Operand(r2));
   2112           break;
   2113         case Token::SAR:
   2114           // Use only the 5 least significant bits of the shift count.
   2115           __ GetLeastBitsFromInt32(r2, r2, 5);
   2116           __ mov(r2, Operand(r3, ASR, r2));
   2117           break;
   2118         case Token::SHR:
   2119           // Use only the 5 least significant bits of the shift count.
   2120           __ GetLeastBitsFromInt32(r2, r2, 5);
   2121           __ mov(r2, Operand(r3, LSR, r2), SetCC);
   2122           // SHR is special because it is required to produce a positive answer.
   2123           // The code below for writing into heap numbers isn't capable of
   2124           // writing the register as an unsigned int so we go to slow case if we
   2125           // hit this case.
   2126           if (CpuFeatures::IsSupported(VFP3)) {
   2127             __ b(mi, &result_not_a_smi);
   2128           } else {
   2129             __ b(mi, not_numbers);
   2130           }
   2131           break;
   2132         case Token::SHL:
   2133           // Use only the 5 least significant bits of the shift count.
   2134           __ GetLeastBitsFromInt32(r2, r2, 5);
   2135           __ mov(r2, Operand(r3, LSL, r2));
   2136           break;
   2137         default:
   2138           UNREACHABLE();
   2139       }
   2140 
   2141       // Check that the *signed* result fits in a smi.
   2142       __ add(r3, r2, Operand(0x40000000), SetCC);
   2143       __ b(mi, &result_not_a_smi);
   2144       __ SmiTag(r0, r2);
   2145       __ Ret();
   2146 
   2147       // Allocate new heap number for result.
   2148       __ bind(&result_not_a_smi);
   2149       Register result = r5;
   2150       if (smi_operands) {
   2151         __ AllocateHeapNumber(
   2152             result, scratch1, scratch2, heap_number_map, gc_required);
   2153       } else {
   2154         GenerateHeapResultAllocation(
   2155             masm, result, heap_number_map, scratch1, scratch2, gc_required);
   2156       }
   2157 
   2158       // r2: Answer as signed int32.
   2159       // r5: Heap number to write answer into.
   2160 
   2161       // Nothing can go wrong now, so move the heap number to r0, which is the
   2162       // result.
   2163       __ mov(r0, Operand(r5));
   2164 
   2165       if (CpuFeatures::IsSupported(VFP3)) {
   2166         // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
   2167         // mentioned above SHR needs to always produce a positive result.
   2168         CpuFeatures::Scope scope(VFP3);
   2169         __ vmov(s0, r2);
   2170         if (op_ == Token::SHR) {
   2171           __ vcvt_f64_u32(d0, s0);
   2172         } else {
   2173           __ vcvt_f64_s32(d0, s0);
   2174         }
   2175         __ sub(r3, r0, Operand(kHeapObjectTag));
   2176         __ vstr(d0, r3, HeapNumber::kValueOffset);
   2177         __ Ret();
   2178       } else {
   2179         // Tail call that writes the int32 in r2 to the heap number in r0, using
   2180         // r3 as scratch. r0 is preserved and returned.
   2181         WriteInt32ToHeapNumberStub stub(r2, r0, r3);
   2182         __ TailCallStub(&stub);
   2183       }
   2184       break;
   2185     }
   2186     default:
   2187       UNREACHABLE();
   2188   }
   2189 }
   2190 
   2191 
   2192 // Generate the smi code. If the operation on smis are successful this return is
   2193 // generated. If the result is not a smi and heap number allocation is not
   2194 // requested the code falls through. If number allocation is requested but a
   2195 // heap number cannot be allocated the code jumps to the lable gc_required.
   2196 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
   2197     Label* use_runtime,
   2198     Label* gc_required,
   2199     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
   2200   Label not_smis;
   2201 
   2202   Register left = r1;
   2203   Register right = r0;
   2204   Register scratch1 = r7;
   2205   Register scratch2 = r9;
   2206 
   2207   // Perform combined smi check on both operands.
   2208   __ orr(scratch1, left, Operand(right));
   2209   STATIC_ASSERT(kSmiTag == 0);
   2210   __ tst(scratch1, Operand(kSmiTagMask));
   2211   __ b(ne, &not_smis);
   2212 
   2213   // If the smi-smi operation results in a smi return is generated.
   2214   GenerateSmiSmiOperation(masm);
   2215 
   2216   // If heap number results are possible generate the result in an allocated
   2217   // heap number.
   2218   if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
   2219     GenerateFPOperation(masm, true, use_runtime, gc_required);
   2220   }
   2221   __ bind(&not_smis);
   2222 }
   2223 
   2224 
   2225 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   2226   Label not_smis, call_runtime;
   2227 
   2228   if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
   2229       result_type_ == TRBinaryOpIC::SMI) {
   2230     // Only allow smi results.
   2231     GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
   2232   } else {
   2233     // Allow heap number result and don't make a transition if a heap number
   2234     // cannot be allocated.
   2235     GenerateSmiCode(masm,
   2236                     &call_runtime,
   2237                     &call_runtime,
   2238                     ALLOW_HEAPNUMBER_RESULTS);
   2239   }
   2240 
   2241   // Code falls through if the result is not returned as either a smi or heap
   2242   // number.
   2243   GenerateTypeTransition(masm);
   2244 
   2245   __ bind(&call_runtime);
   2246   GenerateCallRuntime(masm);
   2247 }
   2248 
   2249 
   2250 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
   2251   ASSERT(operands_type_ == TRBinaryOpIC::STRING);
   2252   ASSERT(op_ == Token::ADD);
   2253   // Try to add arguments as strings, otherwise, transition to the generic
   2254   // TRBinaryOpIC type.
   2255   GenerateAddStrings(masm);
   2256   GenerateTypeTransition(masm);
   2257 }
   2258 
   2259 
   2260 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
   2261   ASSERT(operands_type_ == TRBinaryOpIC::INT32);
   2262 
   2263   Register left = r1;
   2264   Register right = r0;
   2265   Register scratch1 = r7;
   2266   Register scratch2 = r9;
   2267   DwVfpRegister double_scratch = d0;
   2268   SwVfpRegister single_scratch = s3;
   2269 
   2270   Register heap_number_result = no_reg;
   2271   Register heap_number_map = r6;
   2272   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   2273 
   2274   Label call_runtime;
   2275   // Labels for type transition, used for wrong input or output types.
   2276   // Both label are currently actually bound to the same position. We use two
   2277   // different label to differentiate the cause leading to type transition.
   2278   Label transition;
   2279 
   2280   // Smi-smi fast case.
   2281   Label skip;
   2282   __ orr(scratch1, left, right);
   2283   __ JumpIfNotSmi(scratch1, &skip);
   2284   GenerateSmiSmiOperation(masm);
   2285   // Fall through if the result is not a smi.
   2286   __ bind(&skip);
   2287 
   2288   switch (op_) {
   2289     case Token::ADD:
   2290     case Token::SUB:
   2291     case Token::MUL:
   2292     case Token::DIV:
   2293     case Token::MOD: {
   2294     // Load both operands and check that they are 32-bit integer.
   2295     // Jump to type transition if they are not. The registers r0 and r1 (right
   2296     // and left) are preserved for the runtime call.
   2297     FloatingPointHelper::Destination destination =
   2298         CpuFeatures::IsSupported(VFP3) &&
   2299         op_ != Token::MOD ?
   2300         FloatingPointHelper::kVFPRegisters :
   2301         FloatingPointHelper::kCoreRegisters;
   2302 
   2303     FloatingPointHelper::LoadNumberAsInt32Double(masm,
   2304                                                  right,
   2305                                                  destination,
   2306                                                  d7,
   2307                                                  r2,
   2308                                                  r3,
   2309                                                  heap_number_map,
   2310                                                  scratch1,
   2311                                                  scratch2,
   2312                                                  s0,
   2313                                                  &transition);
   2314     FloatingPointHelper::LoadNumberAsInt32Double(masm,
   2315                                                  left,
   2316                                                  destination,
   2317                                                  d6,
   2318                                                  r4,
   2319                                                  r5,
   2320                                                  heap_number_map,
   2321                                                  scratch1,
   2322                                                  scratch2,
   2323                                                  s0,
   2324                                                  &transition);
   2325 
   2326       if (destination == FloatingPointHelper::kVFPRegisters) {
   2327         CpuFeatures::Scope scope(VFP3);
   2328         Label return_heap_number;
   2329         switch (op_) {
   2330           case Token::ADD:
   2331             __ vadd(d5, d6, d7);
   2332             break;
   2333           case Token::SUB:
   2334             __ vsub(d5, d6, d7);
   2335             break;
   2336           case Token::MUL:
   2337             __ vmul(d5, d6, d7);
   2338             break;
   2339           case Token::DIV:
   2340             __ vdiv(d5, d6, d7);
   2341             break;
   2342           default:
   2343             UNREACHABLE();
   2344         }
   2345 
   2346         if (op_ != Token::DIV) {
   2347           // These operations produce an integer result.
   2348           // Try to return a smi if we can.
   2349           // Otherwise return a heap number if allowed, or jump to type
   2350           // transition.
   2351 
   2352           __ EmitVFPTruncate(kRoundToZero,
   2353                              single_scratch,
   2354                              d5,
   2355                              scratch1,
   2356                              scratch2);
   2357 
   2358           if (result_type_ <= TRBinaryOpIC::INT32) {
   2359             // If the ne condition is set, result does
   2360             // not fit in a 32-bit integer.
   2361             __ b(ne, &transition);
   2362           }
   2363 
   2364           // Check if the result fits in a smi.
   2365           __ vmov(scratch1, single_scratch);
   2366           __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
   2367           // If not try to return a heap number.
   2368           __ b(mi, &return_heap_number);
   2369           // Check for minus zero. Return heap number for minus zero.
   2370           Label not_zero;
   2371           __ cmp(scratch1, Operand(0));
   2372           __ b(ne, &not_zero);
   2373           __ vmov(scratch2, d5.high());
   2374           __ tst(scratch2, Operand(HeapNumber::kSignMask));
   2375           __ b(ne, &return_heap_number);
   2376           __ bind(&not_zero);
   2377 
   2378           // Tag the result and return.
   2379           __ SmiTag(r0, scratch1);
   2380           __ Ret();
   2381         } else {
   2382           // DIV just falls through to allocating a heap number.
   2383         }
   2384 
   2385         if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
   2386                                                 : TRBinaryOpIC::INT32) {
   2387           __ bind(&return_heap_number);
   2388           // We are using vfp registers so r5 is available.
   2389           heap_number_result = r5;
   2390           GenerateHeapResultAllocation(masm,
   2391                                        heap_number_result,
   2392                                        heap_number_map,
   2393                                        scratch1,
   2394                                        scratch2,
   2395                                        &call_runtime);
   2396           __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
   2397           __ vstr(d5, r0, HeapNumber::kValueOffset);
   2398           __ mov(r0, heap_number_result);
   2399           __ Ret();
   2400         }
   2401 
   2402         // A DIV operation expecting an integer result falls through
   2403         // to type transition.
   2404 
   2405       } else {
   2406         // We preserved r0 and r1 to be able to call runtime.
   2407         // Save the left value on the stack.
   2408         __ Push(r5, r4);
   2409 
   2410         Label pop_and_call_runtime;
   2411 
   2412         // Allocate a heap number to store the result.
   2413         heap_number_result = r5;
   2414         GenerateHeapResultAllocation(masm,
   2415                                      heap_number_result,
   2416                                      heap_number_map,
   2417                                      scratch1,
   2418                                      scratch2,
   2419                                      &pop_and_call_runtime);
   2420 
   2421         // Load the left value from the value saved on the stack.
   2422         __ Pop(r1, r0);
   2423 
   2424         // Call the C function to handle the double operation.
   2425         FloatingPointHelper::CallCCodeForDoubleOperation(
   2426             masm, op_, heap_number_result, scratch1);
   2427         if (FLAG_debug_code) {
   2428           __ stop("Unreachable code.");
   2429         }
   2430 
   2431         __ bind(&pop_and_call_runtime);
   2432         __ Drop(2);
   2433         __ b(&call_runtime);
   2434       }
   2435 
   2436       break;
   2437     }
   2438 
   2439     case Token::BIT_OR:
   2440     case Token::BIT_XOR:
   2441     case Token::BIT_AND:
   2442     case Token::SAR:
   2443     case Token::SHR:
   2444     case Token::SHL: {
   2445       Label return_heap_number;
   2446       Register scratch3 = r5;
   2447       // Convert operands to 32-bit integers. Right in r2 and left in r3. The
   2448       // registers r0 and r1 (right and left) are preserved for the runtime
   2449       // call.
   2450       FloatingPointHelper::LoadNumberAsInt32(masm,
   2451                                              left,
   2452                                              r3,
   2453                                              heap_number_map,
   2454                                              scratch1,
   2455                                              scratch2,
   2456                                              scratch3,
   2457                                              d0,
   2458                                              &transition);
   2459       FloatingPointHelper::LoadNumberAsInt32(masm,
   2460                                              right,
   2461                                              r2,
   2462                                              heap_number_map,
   2463                                              scratch1,
   2464                                              scratch2,
   2465                                              scratch3,
   2466                                              d0,
   2467                                              &transition);
   2468 
   2469       // The ECMA-262 standard specifies that, for shift operations, only the
   2470       // 5 least significant bits of the shift value should be used.
   2471       switch (op_) {
   2472         case Token::BIT_OR:
   2473           __ orr(r2, r3, Operand(r2));
   2474           break;
   2475         case Token::BIT_XOR:
   2476           __ eor(r2, r3, Operand(r2));
   2477           break;
   2478         case Token::BIT_AND:
   2479           __ and_(r2, r3, Operand(r2));
   2480           break;
   2481         case Token::SAR:
   2482           __ and_(r2, r2, Operand(0x1f));
   2483           __ mov(r2, Operand(r3, ASR, r2));
   2484           break;
   2485         case Token::SHR:
   2486           __ and_(r2, r2, Operand(0x1f));
   2487           __ mov(r2, Operand(r3, LSR, r2), SetCC);
   2488           // SHR is special because it is required to produce a positive answer.
   2489           // We only get a negative result if the shift value (r2) is 0.
   2490           // This result cannot be respresented as a signed 32-bit integer, try
   2491           // to return a heap number if we can.
   2492           // The non vfp3 code does not support this special case, so jump to
   2493           // runtime if we don't support it.
   2494           if (CpuFeatures::IsSupported(VFP3)) {
   2495             __ b(mi,
   2496                  (result_type_ <= TRBinaryOpIC::INT32) ? &transition
   2497                                                        : &return_heap_number);
   2498           } else {
   2499             __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
   2500                                                            : &call_runtime);
   2501           }
   2502           break;
   2503         case Token::SHL:
   2504           __ and_(r2, r2, Operand(0x1f));
   2505           __ mov(r2, Operand(r3, LSL, r2));
   2506           break;
   2507         default:
   2508           UNREACHABLE();
   2509       }
   2510 
   2511       // Check if the result fits in a smi.
   2512       __ add(scratch1, r2, Operand(0x40000000), SetCC);
   2513       // If not try to return a heap number. (We know the result is an int32.)
   2514       __ b(mi, &return_heap_number);
   2515       // Tag the result and return.
   2516       __ SmiTag(r0, r2);
   2517       __ Ret();
   2518 
   2519       __ bind(&return_heap_number);
   2520       heap_number_result = r5;
   2521       GenerateHeapResultAllocation(masm,
   2522                                    heap_number_result,
   2523                                    heap_number_map,
   2524                                    scratch1,
   2525                                    scratch2,
   2526                                    &call_runtime);
   2527 
   2528       if (CpuFeatures::IsSupported(VFP3)) {
   2529         CpuFeatures::Scope scope(VFP3);
   2530         if (op_ != Token::SHR) {
   2531           // Convert the result to a floating point value.
   2532           __ vmov(double_scratch.low(), r2);
   2533           __ vcvt_f64_s32(double_scratch, double_scratch.low());
   2534         } else {
   2535           // The result must be interpreted as an unsigned 32-bit integer.
   2536           __ vmov(double_scratch.low(), r2);
   2537           __ vcvt_f64_u32(double_scratch, double_scratch.low());
   2538         }
   2539 
   2540         // Store the result.
   2541         __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
   2542         __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
   2543         __ mov(r0, heap_number_result);
   2544         __ Ret();
   2545       } else {
   2546         // Tail call that writes the int32 in r2 to the heap number in r0, using
   2547         // r3 as scratch. r0 is preserved and returned.
   2548         __ mov(r0, r5);
   2549         WriteInt32ToHeapNumberStub stub(r2, r0, r3);
   2550         __ TailCallStub(&stub);
   2551       }
   2552 
   2553       break;
   2554     }
   2555 
   2556     default:
   2557       UNREACHABLE();
   2558   }
   2559 
   2560   if (transition.is_linked()) {
   2561     __ bind(&transition);
   2562     GenerateTypeTransition(masm);
   2563   }
   2564 
   2565   __ bind(&call_runtime);
   2566   GenerateCallRuntime(masm);
   2567 }
   2568 
   2569 
   2570 void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   2571   Label call_runtime;
   2572 
   2573   if (op_ == Token::ADD) {
   2574     // Handle string addition here, because it is the only operation
   2575     // that does not do a ToNumber conversion on the operands.
   2576     GenerateAddStrings(masm);
   2577   }
   2578 
   2579   // Convert oddball arguments to numbers.
   2580   Label check, done;
   2581   __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
   2582   __ b(ne, &check);
   2583   if (Token::IsBitOp(op_)) {
   2584     __ mov(r1, Operand(Smi::FromInt(0)));
   2585   } else {
   2586     __ LoadRoot(r1, Heap::kNanValueRootIndex);
   2587   }
   2588   __ jmp(&done);
   2589   __ bind(&check);
   2590   __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
   2591   __ b(ne, &done);
   2592   if (Token::IsBitOp(op_)) {
   2593     __ mov(r0, Operand(Smi::FromInt(0)));
   2594   } else {
   2595     __ LoadRoot(r0, Heap::kNanValueRootIndex);
   2596   }
   2597   __ bind(&done);
   2598 
   2599   GenerateHeapNumberStub(masm);
   2600 }
   2601 
   2602 
   2603 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   2604   Label call_runtime;
   2605   GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
   2606 
   2607   __ bind(&call_runtime);
   2608   GenerateCallRuntime(masm);
   2609 }
   2610 
   2611 
   2612 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   2613   Label call_runtime, call_string_add_or_runtime;
   2614 
   2615   GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
   2616 
   2617   GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
   2618 
   2619   __ bind(&call_string_add_or_runtime);
   2620   if (op_ == Token::ADD) {
   2621     GenerateAddStrings(masm);
   2622   }
   2623 
   2624   __ bind(&call_runtime);
   2625   GenerateCallRuntime(masm);
   2626 }
   2627 
   2628 
   2629 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
   2630   ASSERT(op_ == Token::ADD);
   2631   Label left_not_string, call_runtime;
   2632 
   2633   Register left = r1;
   2634   Register right = r0;
   2635 
   2636   // Check if left argument is a string.
   2637   __ JumpIfSmi(left, &left_not_string);
   2638   __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
   2639   __ b(ge, &left_not_string);
   2640 
   2641   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
   2642   GenerateRegisterArgsPush(masm);
   2643   __ TailCallStub(&string_add_left_stub);
   2644 
   2645   // Left operand is not a string, test right.
   2646   __ bind(&left_not_string);
   2647   __ JumpIfSmi(right, &call_runtime);
   2648   __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
   2649   __ b(ge, &call_runtime);
   2650 
   2651   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
   2652   GenerateRegisterArgsPush(masm);
   2653   __ TailCallStub(&string_add_right_stub);
   2654 
   2655   // At least one argument is not a string.
   2656   __ bind(&call_runtime);
   2657 }
   2658 
   2659 
   2660 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
   2661   GenerateRegisterArgsPush(masm);
   2662   switch (op_) {
   2663     case Token::ADD:
   2664       __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
   2665       break;
   2666     case Token::SUB:
   2667       __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
   2668       break;
   2669     case Token::MUL:
   2670       __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
   2671       break;
   2672     case Token::DIV:
   2673       __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
   2674       break;
   2675     case Token::MOD:
   2676       __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
   2677       break;
   2678     case Token::BIT_OR:
   2679       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
   2680       break;
   2681     case Token::BIT_AND:
   2682       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
   2683       break;
   2684     case Token::BIT_XOR:
   2685       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
   2686       break;
   2687     case Token::SAR:
   2688       __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
   2689       break;
   2690     case Token::SHR:
   2691       __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
   2692       break;
   2693     case Token::SHL:
   2694       __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
   2695       break;
   2696     default:
   2697       UNREACHABLE();
   2698   }
   2699 }
   2700 
   2701 
   2702 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
   2703     MacroAssembler* masm,
   2704     Register result,
   2705     Register heap_number_map,
   2706     Register scratch1,
   2707     Register scratch2,
   2708     Label* gc_required) {
   2709 
   2710   // Code below will scratch result if allocation fails. To keep both arguments
   2711   // intact for the runtime call result cannot be one of these.
   2712   ASSERT(!result.is(r0) && !result.is(r1));
   2713 
   2714   if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
   2715     Label skip_allocation, allocated;
   2716     Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
   2717     // If the overwritable operand is already an object, we skip the
   2718     // allocation of a heap number.
   2719     __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
   2720     // Allocate a heap number for the result.
   2721     __ AllocateHeapNumber(
   2722         result, scratch1, scratch2, heap_number_map, gc_required);
   2723     __ b(&allocated);
   2724     __ bind(&skip_allocation);
   2725     // Use object holding the overwritable operand for result.
   2726     __ mov(result, Operand(overwritable_operand));
   2727     __ bind(&allocated);
   2728   } else {
   2729     ASSERT(mode_ == NO_OVERWRITE);
   2730     __ AllocateHeapNumber(
   2731         result, scratch1, scratch2, heap_number_map, gc_required);
   2732   }
   2733 }
   2734 
   2735 
   2736 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   2737   __ Push(r1, r0);
   2738 }
   2739 
   2740 
   2741 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   2742   // Untagged case: double input in d2, double result goes
   2743   //   into d2.
   2744   // Tagged case: tagged input on top of stack and in r0,
   2745   //   tagged result (heap number) goes into r0.
   2746 
   2747   Label input_not_smi;
   2748   Label loaded;
   2749   Label calculate;
   2750   Label invalid_cache;
   2751   const Register scratch0 = r9;
   2752   const Register scratch1 = r7;
   2753   const Register cache_entry = r0;
   2754   const bool tagged = (argument_type_ == TAGGED);
   2755 
   2756   if (CpuFeatures::IsSupported(VFP3)) {
   2757     CpuFeatures::Scope scope(VFP3);
   2758     if (tagged) {
   2759       // Argument is a number and is on stack and in r0.
   2760       // Load argument and check if it is a smi.
   2761       __ JumpIfNotSmi(r0, &input_not_smi);
   2762 
   2763       // Input is a smi. Convert to double and load the low and high words
   2764       // of the double into r2, r3.
   2765       __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
   2766       __ b(&loaded);
   2767 
   2768       __ bind(&input_not_smi);
   2769       // Check if input is a HeapNumber.
   2770       __ CheckMap(r0,
   2771                   r1,
   2772                   Heap::kHeapNumberMapRootIndex,
   2773                   &calculate,
   2774                   true);
   2775       // Input is a HeapNumber. Load it to a double register and store the
   2776       // low and high words into r2, r3.
   2777       __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
   2778       __ vmov(r2, r3, d0);
   2779     } else {
   2780       // Input is untagged double in d2. Output goes to d2.
   2781       __ vmov(r2, r3, d2);
   2782     }
   2783     __ bind(&loaded);
   2784     // r2 = low 32 bits of double value
   2785     // r3 = high 32 bits of double value
   2786     // Compute hash (the shifts are arithmetic):
   2787     //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
   2788     __ eor(r1, r2, Operand(r3));
   2789     __ eor(r1, r1, Operand(r1, ASR, 16));
   2790     __ eor(r1, r1, Operand(r1, ASR, 8));
   2791     ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
   2792     __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
   2793 
   2794     // r2 = low 32 bits of double value.
   2795     // r3 = high 32 bits of double value.
   2796     // r1 = TranscendentalCache::hash(double value).
   2797     Isolate* isolate = masm->isolate();
   2798     ExternalReference cache_array =
   2799         ExternalReference::transcendental_cache_array_address(isolate);
   2800     __ mov(cache_entry, Operand(cache_array));
   2801     // cache_entry points to cache array.
   2802     int cache_array_index
   2803         = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
   2804     __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
   2805     // r0 points to the cache for the type type_.
   2806     // If NULL, the cache hasn't been initialized yet, so go through runtime.
   2807     __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
   2808     __ b(eq, &invalid_cache);
   2809 
   2810 #ifdef DEBUG
   2811     // Check that the layout of cache elements match expectations.
   2812     { TranscendentalCache::SubCache::Element test_elem[2];
   2813       char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
   2814       char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
   2815       char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
   2816       char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
   2817       char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
   2818       CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
   2819       CHECK_EQ(0, elem_in0 - elem_start);
   2820       CHECK_EQ(kIntSize, elem_in1 - elem_start);
   2821       CHECK_EQ(2 * kIntSize, elem_out - elem_start);
   2822     }
   2823 #endif
   2824 
   2825     // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
   2826     __ add(r1, r1, Operand(r1, LSL, 1));
   2827     __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
   2828     // Check if cache matches: Double value is stored in uint32_t[2] array.
   2829     __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
   2830     __ cmp(r2, r4);
   2831     __ b(ne, &calculate);
   2832     __ cmp(r3, r5);
   2833     __ b(ne, &calculate);
   2834     // Cache hit. Load result, cleanup and return.
   2835     if (tagged) {
   2836       // Pop input value from stack and load result into r0.
   2837       __ pop();
   2838       __ mov(r0, Operand(r6));
   2839     } else {
   2840       // Load result into d2.
   2841        __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
   2842     }
   2843     __ Ret();
   2844   }  // if (CpuFeatures::IsSupported(VFP3))
   2845 
   2846   __ bind(&calculate);
   2847   if (tagged) {
   2848     __ bind(&invalid_cache);
   2849     ExternalReference runtime_function =
   2850         ExternalReference(RuntimeFunction(), masm->isolate());
   2851     __ TailCallExternalReference(runtime_function, 1, 1);
   2852   } else {
   2853     if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
   2854     CpuFeatures::Scope scope(VFP3);
   2855 
   2856     Label no_update;
   2857     Label skip_cache;
   2858     const Register heap_number_map = r5;
   2859 
   2860     // Call C function to calculate the result and update the cache.
   2861     // Register r0 holds precalculated cache entry address; preserve
   2862     // it on the stack and pop it into register cache_entry after the
   2863     // call.
   2864     __ push(cache_entry);
   2865     GenerateCallCFunction(masm, scratch0);
   2866     __ GetCFunctionDoubleResult(d2);
   2867 
   2868     // Try to update the cache. If we cannot allocate a
   2869     // heap number, we return the result without updating.
   2870     __ pop(cache_entry);
   2871     __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
   2872     __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
   2873     __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
   2874     __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
   2875     __ Ret();
   2876 
   2877     __ bind(&invalid_cache);
   2878     // The cache is invalid. Call runtime which will recreate the
   2879     // cache.
   2880     __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
   2881     __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
   2882     __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
   2883     __ EnterInternalFrame();
   2884     __ push(r0);
   2885     __ CallRuntime(RuntimeFunction(), 1);
   2886     __ LeaveInternalFrame();
   2887     __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
   2888     __ Ret();
   2889 
   2890     __ bind(&skip_cache);
   2891     // Call C function to calculate the result and answer directly
   2892     // without updating the cache.
   2893     GenerateCallCFunction(masm, scratch0);
   2894     __ GetCFunctionDoubleResult(d2);
   2895     __ bind(&no_update);
   2896 
   2897     // We return the value in d2 without adding it to the cache, but
   2898     // we cause a scavenging GC so that future allocations will succeed.
   2899     __ EnterInternalFrame();
   2900 
   2901     // Allocate an aligned object larger than a HeapNumber.
   2902     ASSERT(4 * kPointerSize >= HeapNumber::kSize);
   2903     __ mov(scratch0, Operand(4 * kPointerSize));
   2904     __ push(scratch0);
   2905     __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
   2906     __ LeaveInternalFrame();
   2907     __ Ret();
   2908   }
   2909 }
   2910 
   2911 
   2912 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
   2913                                                     Register scratch) {
   2914   Isolate* isolate = masm->isolate();
   2915 
   2916   __ push(lr);
   2917   __ PrepareCallCFunction(2, scratch);
   2918   __ vmov(r0, r1, d2);
   2919   switch (type_) {
   2920     case TranscendentalCache::SIN:
   2921       __ CallCFunction(ExternalReference::math_sin_double_function(isolate), 2);
   2922       break;
   2923     case TranscendentalCache::COS:
   2924       __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 2);
   2925       break;
   2926     case TranscendentalCache::LOG:
   2927       __ CallCFunction(ExternalReference::math_log_double_function(isolate), 2);
   2928       break;
   2929     default:
   2930       UNIMPLEMENTED();
   2931       break;
   2932   }
   2933   __ pop(lr);
   2934 }
   2935 
   2936 
   2937 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   2938   switch (type_) {
   2939     // Add more cases when necessary.
   2940     case TranscendentalCache::SIN: return Runtime::kMath_sin;
   2941     case TranscendentalCache::COS: return Runtime::kMath_cos;
   2942     case TranscendentalCache::LOG: return Runtime::kMath_log;
   2943     default:
   2944       UNIMPLEMENTED();
   2945       return Runtime::kAbort;
   2946   }
   2947 }
   2948 
   2949 
   2950 void StackCheckStub::Generate(MacroAssembler* masm) {
   2951   __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
   2952 }
   2953 
   2954 
   2955 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   2956   Label slow, done;
   2957 
   2958   Register heap_number_map = r6;
   2959   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   2960 
   2961   if (op_ == Token::SUB) {
   2962     if (include_smi_code_) {
   2963       // Check whether the value is a smi.
   2964       Label try_float;
   2965       __ tst(r0, Operand(kSmiTagMask));
   2966       __ b(ne, &try_float);
   2967 
   2968       // Go slow case if the value of the expression is zero
   2969       // to make sure that we switch between 0 and -0.
   2970       if (negative_zero_ == kStrictNegativeZero) {
   2971         // If we have to check for zero, then we can check for the max negative
   2972         // smi while we are at it.
   2973         __ bic(ip, r0, Operand(0x80000000), SetCC);
   2974         __ b(eq, &slow);
   2975         __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
   2976         __ Ret();
   2977       } else {
   2978         // The value of the expression is a smi and 0 is OK for -0.  Try
   2979         // optimistic subtraction '0 - value'.
   2980         __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
   2981         __ Ret(vc);
   2982         // We don't have to reverse the optimistic neg since the only case
   2983         // where we fall through is the minimum negative Smi, which is the case
   2984         // where the neg leaves the register unchanged.
   2985         __ jmp(&slow);  // Go slow on max negative Smi.
   2986       }
   2987       __ bind(&try_float);
   2988     } else if (FLAG_debug_code) {
   2989       __ tst(r0, Operand(kSmiTagMask));
   2990       __ Assert(ne, "Unexpected smi operand.");
   2991     }
   2992 
   2993     __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   2994     __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   2995     __ cmp(r1, heap_number_map);
   2996     __ b(ne, &slow);
   2997     // r0 is a heap number.  Get a new heap number in r1.
   2998     if (overwrite_ == UNARY_OVERWRITE) {
   2999       __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
   3000       __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
   3001       __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
   3002     } else {
   3003       __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
   3004       __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
   3005       __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
   3006       __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
   3007       __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
   3008       __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
   3009       __ mov(r0, Operand(r1));
   3010     }
   3011   } else if (op_ == Token::BIT_NOT) {
   3012     if (include_smi_code_) {
   3013       Label non_smi;
   3014       __ JumpIfNotSmi(r0, &non_smi);
   3015       __ mvn(r0, Operand(r0));
   3016       // Bit-clear inverted smi-tag.
   3017       __ bic(r0, r0, Operand(kSmiTagMask));
   3018       __ Ret();
   3019       __ bind(&non_smi);
   3020     } else if (FLAG_debug_code) {
   3021       __ tst(r0, Operand(kSmiTagMask));
   3022       __ Assert(ne, "Unexpected smi operand.");
   3023     }
   3024 
   3025     // Check if the operand is a heap number.
   3026     __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   3027     __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   3028     __ cmp(r1, heap_number_map);
   3029     __ b(ne, &slow);
   3030 
   3031     // Convert the heap number is r0 to an untagged integer in r1.
   3032     __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
   3033 
   3034     // Do the bitwise operation (move negated) and check if the result
   3035     // fits in a smi.
   3036     Label try_float;
   3037     __ mvn(r1, Operand(r1));
   3038     __ add(r2, r1, Operand(0x40000000), SetCC);
   3039     __ b(mi, &try_float);
   3040     __ mov(r0, Operand(r1, LSL, kSmiTagSize));
   3041     __ b(&done);
   3042 
   3043     __ bind(&try_float);
   3044     if (!overwrite_ == UNARY_OVERWRITE) {
   3045       // Allocate a fresh heap number, but don't overwrite r0 until
   3046       // we're sure we can do it without going through the slow case
   3047       // that needs the value in r0.
   3048       __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
   3049       __ mov(r0, Operand(r2));
   3050     }
   3051 
   3052     if (CpuFeatures::IsSupported(VFP3)) {
   3053       // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
   3054       CpuFeatures::Scope scope(VFP3);
   3055       __ vmov(s0, r1);
   3056       __ vcvt_f64_s32(d0, s0);
   3057       __ sub(r2, r0, Operand(kHeapObjectTag));
   3058       __ vstr(d0, r2, HeapNumber::kValueOffset);
   3059     } else {
   3060       // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
   3061       // have to set up a frame.
   3062       WriteInt32ToHeapNumberStub stub(r1, r0, r2);
   3063       __ push(lr);
   3064       __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   3065       __ pop(lr);
   3066     }
   3067   } else {
   3068     UNIMPLEMENTED();
   3069   }
   3070 
   3071   __ bind(&done);
   3072   __ Ret();
   3073 
   3074   // Handle the slow case by jumping to the JavaScript builtin.
   3075   __ bind(&slow);
   3076   __ push(r0);
   3077   switch (op_) {
   3078     case Token::SUB:
   3079       __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
   3080       break;
   3081     case Token::BIT_NOT:
   3082       __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
   3083       break;
   3084     default:
   3085       UNREACHABLE();
   3086   }
   3087 }
   3088 
   3089 
   3090 void MathPowStub::Generate(MacroAssembler* masm) {
   3091   Label call_runtime;
   3092 
   3093   if (CpuFeatures::IsSupported(VFP3)) {
   3094     CpuFeatures::Scope scope(VFP3);
   3095 
   3096     Label base_not_smi;
   3097     Label exponent_not_smi;
   3098     Label convert_exponent;
   3099 
   3100     const Register base = r0;
   3101     const Register exponent = r1;
   3102     const Register heapnumbermap = r5;
   3103     const Register heapnumber = r6;
   3104     const DoubleRegister double_base = d0;
   3105     const DoubleRegister double_exponent = d1;
   3106     const DoubleRegister double_result = d2;
   3107     const SwVfpRegister single_scratch = s0;
   3108     const Register scratch = r9;
   3109     const Register scratch2 = r7;
   3110 
   3111     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
   3112     __ ldr(base, MemOperand(sp, 1 * kPointerSize));
   3113     __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
   3114 
   3115     // Convert base to double value and store it in d0.
   3116     __ JumpIfNotSmi(base, &base_not_smi);
   3117     // Base is a Smi. Untag and convert it.
   3118     __ SmiUntag(base);
   3119     __ vmov(single_scratch, base);
   3120     __ vcvt_f64_s32(double_base, single_scratch);
   3121     __ b(&convert_exponent);
   3122 
   3123     __ bind(&base_not_smi);
   3124     __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
   3125     __ cmp(scratch, heapnumbermap);
   3126     __ b(ne, &call_runtime);
   3127     // Base is a heapnumber. Load it into double register.
   3128     __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
   3129 
   3130     __ bind(&convert_exponent);
   3131     __ JumpIfNotSmi(exponent, &exponent_not_smi);
   3132     __ SmiUntag(exponent);
   3133 
   3134     // The base is in a double register and the exponent is
   3135     // an untagged smi. Allocate a heap number and call a
   3136     // C function for integer exponents. The register containing
   3137     // the heap number is callee-saved.
   3138     __ AllocateHeapNumber(heapnumber,
   3139                           scratch,
   3140                           scratch2,
   3141                           heapnumbermap,
   3142                           &call_runtime);
   3143     __ push(lr);
   3144     __ PrepareCallCFunction(3, scratch);
   3145     __ mov(r2, exponent);
   3146     __ vmov(r0, r1, double_base);
   3147     __ CallCFunction(
   3148         ExternalReference::power_double_int_function(masm->isolate()), 3);
   3149     __ pop(lr);
   3150     __ GetCFunctionDoubleResult(double_result);
   3151     __ vstr(double_result,
   3152             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
   3153     __ mov(r0, heapnumber);
   3154     __ Ret(2 * kPointerSize);
   3155 
   3156     __ bind(&exponent_not_smi);
   3157     __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
   3158     __ cmp(scratch, heapnumbermap);
   3159     __ b(ne, &call_runtime);
   3160     // Exponent is a heapnumber. Load it into double register.
   3161     __ vldr(double_exponent,
   3162             FieldMemOperand(exponent, HeapNumber::kValueOffset));
   3163 
   3164     // The base and the exponent are in double registers.
   3165     // Allocate a heap number and call a C function for
   3166     // double exponents. The register containing
   3167     // the heap number is callee-saved.
   3168     __ AllocateHeapNumber(heapnumber,
   3169                           scratch,
   3170                           scratch2,
   3171                           heapnumbermap,
   3172                           &call_runtime);
   3173     __ push(lr);
   3174     __ PrepareCallCFunction(4, scratch);
   3175     __ vmov(r0, r1, double_base);
   3176     __ vmov(r2, r3, double_exponent);
   3177     __ CallCFunction(
   3178         ExternalReference::power_double_double_function(masm->isolate()), 4);
   3179     __ pop(lr);
   3180     __ GetCFunctionDoubleResult(double_result);
   3181     __ vstr(double_result,
   3182             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
   3183     __ mov(r0, heapnumber);
   3184     __ Ret(2 * kPointerSize);
   3185   }
   3186 
   3187   __ bind(&call_runtime);
   3188   __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
   3189 }
   3190 
   3191 
   3192 bool CEntryStub::NeedsImmovableCode() {
   3193   return true;
   3194 }
   3195 
   3196 
   3197 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   3198   __ Throw(r0);
   3199 }
   3200 
   3201 
   3202 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
   3203                                           UncatchableExceptionType type) {
   3204   __ ThrowUncatchable(type, r0);
   3205 }
   3206 
   3207 
   3208 void CEntryStub::GenerateCore(MacroAssembler* masm,
   3209                               Label* throw_normal_exception,
   3210                               Label* throw_termination_exception,
   3211                               Label* throw_out_of_memory_exception,
   3212                               bool do_gc,
   3213                               bool always_allocate) {
   3214   // r0: result parameter for PerformGC, if any
   3215   // r4: number of arguments including receiver  (C callee-saved)
   3216   // r5: pointer to builtin function  (C callee-saved)
   3217   // r6: pointer to the first argument (C callee-saved)
   3218   Isolate* isolate = masm->isolate();
   3219 
   3220   if (do_gc) {
   3221     // Passing r0.
   3222     __ PrepareCallCFunction(1, r1);
   3223     __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1);
   3224   }
   3225 
   3226   ExternalReference scope_depth =
   3227       ExternalReference::heap_always_allocate_scope_depth(isolate);
   3228   if (always_allocate) {
   3229     __ mov(r0, Operand(scope_depth));
   3230     __ ldr(r1, MemOperand(r0));
   3231     __ add(r1, r1, Operand(1));
   3232     __ str(r1, MemOperand(r0));
   3233   }
   3234 
   3235   // Call C built-in.
   3236   // r0 = argc, r1 = argv
   3237   __ mov(r0, Operand(r4));
   3238   __ mov(r1, Operand(r6));
   3239 
   3240 #if defined(V8_HOST_ARCH_ARM)
   3241   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   3242   int frame_alignment_mask = frame_alignment - 1;
   3243   if (FLAG_debug_code) {
   3244     if (frame_alignment > kPointerSize) {
   3245       Label alignment_as_expected;
   3246       ASSERT(IsPowerOf2(frame_alignment));
   3247       __ tst(sp, Operand(frame_alignment_mask));
   3248       __ b(eq, &alignment_as_expected);
   3249       // Don't use Check here, as it will call Runtime_Abort re-entering here.
   3250       __ stop("Unexpected alignment");
   3251       __ bind(&alignment_as_expected);
   3252     }
   3253   }
   3254 #endif
   3255 
   3256   __ mov(r2, Operand(ExternalReference::isolate_address()));
   3257 
   3258 
   3259   // TODO(1242173): To let the GC traverse the return address of the exit
   3260   // frames, we need to know where the return address is. Right now,
   3261   // we store it on the stack to be able to find it again, but we never
   3262   // restore from it in case of changes, which makes it impossible to
   3263   // support moving the C entry code stub. This should be fixed, but currently
   3264   // this is OK because the CEntryStub gets generated so early in the V8 boot
   3265   // sequence that it is not moving ever.
   3266 
   3267   // Compute the return address in lr to return to after the jump below. Pc is
   3268   // already at '+ 8' from the current instruction but return is after three
   3269   // instructions so add another 4 to pc to get the return address.
   3270   masm->add(lr, pc, Operand(4));
   3271   __ str(lr, MemOperand(sp, 0));
   3272   masm->Jump(r5);
   3273 
   3274   if (always_allocate) {
   3275     // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
   3276     // though (contain the result).
   3277     __ mov(r2, Operand(scope_depth));
   3278     __ ldr(r3, MemOperand(r2));
   3279     __ sub(r3, r3, Operand(1));
   3280     __ str(r3, MemOperand(r2));
   3281   }
   3282 
   3283   // check for failure result
   3284   Label failure_returned;
   3285   STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   3286   // Lower 2 bits of r2 are 0 iff r0 has failure tag.
   3287   __ add(r2, r0, Operand(1));
   3288   __ tst(r2, Operand(kFailureTagMask));
   3289   __ b(eq, &failure_returned);
   3290 
   3291   // Exit C frame and return.
   3292   // r0:r1: result
   3293   // sp: stack pointer
   3294   // fp: frame pointer
   3295   //  Callee-saved register r4 still holds argc.
   3296   __ LeaveExitFrame(save_doubles_, r4);
   3297   __ mov(pc, lr);
   3298 
   3299   // check if we should retry or throw exception
   3300   Label retry;
   3301   __ bind(&failure_returned);
   3302   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   3303   __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
   3304   __ b(eq, &retry);
   3305 
   3306   // Special handling of out of memory exceptions.
   3307   Failure* out_of_memory = Failure::OutOfMemoryException();
   3308   __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
   3309   __ b(eq, throw_out_of_memory_exception);
   3310 
   3311   // Retrieve the pending exception and clear the variable.
   3312   __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
   3313   __ ldr(r3, MemOperand(ip));
   3314   __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
   3315                                        isolate)));
   3316   __ ldr(r0, MemOperand(ip));
   3317   __ str(r3, MemOperand(ip));
   3318 
   3319   // Special handling of termination exceptions which are uncatchable
   3320   // by javascript code.
   3321   __ cmp(r0, Operand(isolate->factory()->termination_exception()));
   3322   __ b(eq, throw_termination_exception);
   3323 
   3324   // Handle normal exception.
   3325   __ jmp(throw_normal_exception);
   3326 
   3327   __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
   3328 }
   3329 
   3330 
   3331 void CEntryStub::Generate(MacroAssembler* masm) {
   3332   // Called from JavaScript; parameters are on stack as if calling JS function
   3333   // r0: number of arguments including receiver
   3334   // r1: pointer to builtin function
   3335   // fp: frame pointer  (restored after C call)
   3336   // sp: stack pointer  (restored as callee's sp after C call)
   3337   // cp: current context  (C callee-saved)
   3338 
   3339   // Result returned in r0 or r0+r1 by default.
   3340 
   3341   // NOTE: Invocations of builtins may return failure objects
   3342   // instead of a proper result. The builtin entry handles
   3343   // this by performing a garbage collection and retrying the
   3344   // builtin once.
   3345 
   3346   // Compute the argv pointer in a callee-saved register.
   3347   __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
   3348   __ sub(r6, r6, Operand(kPointerSize));
   3349 
   3350   // Enter the exit frame that transitions from JavaScript to C++.
   3351   __ EnterExitFrame(save_doubles_);
   3352 
   3353   // Setup argc and the builtin function in callee-saved registers.
   3354   __ mov(r4, Operand(r0));
   3355   __ mov(r5, Operand(r1));
   3356 
   3357   // r4: number of arguments (C callee-saved)
   3358   // r5: pointer to builtin function (C callee-saved)
   3359   // r6: pointer to first argument (C callee-saved)
   3360 
   3361   Label throw_normal_exception;
   3362   Label throw_termination_exception;
   3363   Label throw_out_of_memory_exception;
   3364 
   3365   // Call into the runtime system.
   3366   GenerateCore(masm,
   3367                &throw_normal_exception,
   3368                &throw_termination_exception,
   3369                &throw_out_of_memory_exception,
   3370                false,
   3371                false);
   3372 
   3373   // Do space-specific GC and retry runtime call.
   3374   GenerateCore(masm,
   3375                &throw_normal_exception,
   3376                &throw_termination_exception,
   3377                &throw_out_of_memory_exception,
   3378                true,
   3379                false);
   3380 
   3381   // Do full GC and retry runtime call one final time.
   3382   Failure* failure = Failure::InternalError();
   3383   __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
   3384   GenerateCore(masm,
   3385                &throw_normal_exception,
   3386                &throw_termination_exception,
   3387                &throw_out_of_memory_exception,
   3388                true,
   3389                true);
   3390 
   3391   __ bind(&throw_out_of_memory_exception);
   3392   GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
   3393 
   3394   __ bind(&throw_termination_exception);
   3395   GenerateThrowUncatchable(masm, TERMINATION);
   3396 
   3397   __ bind(&throw_normal_exception);
   3398   GenerateThrowTOS(masm);
   3399 }
   3400 
   3401 
   3402 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   3403   // r0: code entry
   3404   // r1: function
   3405   // r2: receiver
   3406   // r3: argc
   3407   // [sp+0]: argv
   3408 
   3409   Label invoke, exit;
   3410 
   3411   // Called from C, so do not pop argc and args on exit (preserve sp)
   3412   // No need to save register-passed args
   3413   // Save callee-saved registers (incl. cp and fp), sp, and lr
   3414   __ stm(db_w, sp, kCalleeSaved | lr.bit());
   3415 
   3416   if (CpuFeatures::IsSupported(VFP3)) {
   3417     CpuFeatures::Scope scope(VFP3);
   3418     // Save callee-saved vfp registers.
   3419     __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
   3420   }
   3421 
   3422   // Get address of argv, see stm above.
   3423   // r0: code entry
   3424   // r1: function
   3425   // r2: receiver
   3426   // r3: argc
   3427 
   3428   // Setup argv in r4.
   3429   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   3430   if (CpuFeatures::IsSupported(VFP3)) {
   3431     offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
   3432   }
   3433   __ ldr(r4, MemOperand(sp, offset_to_argv));
   3434 
   3435   // Push a frame with special values setup to mark it as an entry frame.
   3436   // r0: code entry
   3437   // r1: function
   3438   // r2: receiver
   3439   // r3: argc
   3440   // r4: argv
   3441   Isolate* isolate = masm->isolate();
   3442   __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
   3443   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
   3444   __ mov(r7, Operand(Smi::FromInt(marker)));
   3445   __ mov(r6, Operand(Smi::FromInt(marker)));
   3446   __ mov(r5,
   3447          Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
   3448   __ ldr(r5, MemOperand(r5));
   3449   __ Push(r8, r7, r6, r5);
   3450 
   3451   // Setup frame pointer for the frame to be pushed.
   3452   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
   3453 
   3454 #ifdef ENABLE_LOGGING_AND_PROFILING
   3455   // If this is the outermost JS call, set js_entry_sp value.
   3456   Label non_outermost_js;
   3457   ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
   3458   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
   3459   __ ldr(r6, MemOperand(r5));
   3460   __ cmp(r6, Operand(0));
   3461   __ b(ne, &non_outermost_js);
   3462   __ str(fp, MemOperand(r5));
   3463   __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   3464   Label cont;
   3465   __ b(&cont);
   3466   __ bind(&non_outermost_js);
   3467   __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   3468   __ bind(&cont);
   3469   __ push(ip);
   3470 #endif
   3471 
   3472   // Call a faked try-block that does the invoke.
   3473   __ bl(&invoke);
   3474 
   3475   // Caught exception: Store result (exception) in the pending
   3476   // exception field in the JSEnv and return a failure sentinel.
   3477   // Coming in here the fp will be invalid because the PushTryHandler below
   3478   // sets it to 0 to signal the existence of the JSEntry frame.
   3479   __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
   3480                                        isolate)));
   3481   __ str(r0, MemOperand(ip));
   3482   __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   3483   __ b(&exit);
   3484 
   3485   // Invoke: Link this frame into the handler chain.
   3486   __ bind(&invoke);
   3487   // Must preserve r0-r4, r5-r7 are available.
   3488   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
   3489   // If an exception not caught by another handler occurs, this handler
   3490   // returns control to the code after the bl(&invoke) above, which
   3491   // restores all kCalleeSaved registers (including cp and fp) to their
   3492   // saved values before returning a failure to C.
   3493 
   3494   // Clear any pending exceptions.
   3495   __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
   3496   __ ldr(r5, MemOperand(ip));
   3497   __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
   3498                                        isolate)));
   3499   __ str(r5, MemOperand(ip));
   3500 
   3501   // Invoke the function by calling through JS entry trampoline builtin.
   3502   // Notice that we cannot store a reference to the trampoline code directly in
   3503   // this stub, because runtime stubs are not traversed when doing GC.
   3504 
   3505   // Expected registers by Builtins::JSEntryTrampoline
   3506   // r0: code entry
   3507   // r1: function
   3508   // r2: receiver
   3509   // r3: argc
   3510   // r4: argv
   3511   if (is_construct) {
   3512     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
   3513                                       isolate);
   3514     __ mov(ip, Operand(construct_entry));
   3515   } else {
   3516     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
   3517     __ mov(ip, Operand(entry));
   3518   }
   3519   __ ldr(ip, MemOperand(ip));  // deref address
   3520 
   3521   // Branch and link to JSEntryTrampoline.  We don't use the double underscore
   3522   // macro for the add instruction because we don't want the coverage tool
   3523   // inserting instructions here after we read the pc.
   3524   __ mov(lr, Operand(pc));
   3525   masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   3526 
   3527   // Unlink this frame from the handler chain.
   3528   __ PopTryHandler();
   3529 
   3530   __ bind(&exit);  // r0 holds result
   3531 #ifdef ENABLE_LOGGING_AND_PROFILING
   3532   // Check if the current stack frame is marked as the outermost JS frame.
   3533   Label non_outermost_js_2;
   3534   __ pop(r5);
   3535   __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   3536   __ b(ne, &non_outermost_js_2);
   3537   __ mov(r6, Operand(0));
   3538   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
   3539   __ str(r6, MemOperand(r5));
   3540   __ bind(&non_outermost_js_2);
   3541 #endif
   3542 
   3543   // Restore the top frame descriptors from the stack.
   3544   __ pop(r3);
   3545   __ mov(ip,
   3546          Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
   3547   __ str(r3, MemOperand(ip));
   3548 
   3549   // Reset the stack to the callee saved registers.
   3550   __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
   3551 
   3552   // Restore callee-saved registers and return.
   3553 #ifdef DEBUG
   3554   if (FLAG_debug_code) {
   3555     __ mov(lr, Operand(pc));
   3556   }
   3557 #endif
   3558 
   3559   if (CpuFeatures::IsSupported(VFP3)) {
   3560     CpuFeatures::Scope scope(VFP3);
   3561     // Restore callee-saved vfp registers.
   3562     __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
   3563   }
   3564 
   3565   __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
   3566 }
   3567 
   3568 
   3569 // Uses registers r0 to r4.
   3570 // Expected input (depending on whether args are in registers or on the stack):
   3571 // * object: r0 or at sp + 1 * kPointerSize.
   3572 // * function: r1 or at sp.
   3573 //
   3574 // An inlined call site may have been generated before calling this stub.
   3575 // In this case the offset to the inline site to patch is passed on the stack,
   3576 // in the safepoint slot for register r4.
   3577 // (See LCodeGen::DoInstanceOfKnownGlobal)
   3578 void InstanceofStub::Generate(MacroAssembler* masm) {
   3579   // Call site inlining and patching implies arguments in registers.
   3580   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
   3581   // ReturnTrueFalse is only implemented for inlined call sites.
   3582   ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
   3583 
   3584   // Fixed register usage throughout the stub:
   3585   const Register object = r0;  // Object (lhs).
   3586   Register map = r3;  // Map of the object.
   3587   const Register function = r1;  // Function (rhs).
   3588   const Register prototype = r4;  // Prototype of the function.
   3589   const Register inline_site = r9;
   3590   const Register scratch = r2;
   3591 
   3592   const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
   3593 
   3594   Label slow, loop, is_instance, is_not_instance, not_js_object;
   3595 
   3596   if (!HasArgsInRegisters()) {
   3597     __ ldr(object, MemOperand(sp, 1 * kPointerSize));
   3598     __ ldr(function, MemOperand(sp, 0));
   3599   }
   3600 
   3601   // Check that the left hand is a JS object and load map.
   3602   __ JumpIfSmi(object, &not_js_object);
   3603   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
   3604 
   3605   // If there is a call site cache don't look in the global cache, but do the
   3606   // real lookup and update the call site cache.
   3607   if (!HasCallSiteInlineCheck()) {
   3608     Label miss;
   3609     __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
   3610     __ cmp(function, ip);
   3611     __ b(ne, &miss);
   3612     __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
   3613     __ cmp(map, ip);
   3614     __ b(ne, &miss);
   3615     __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
   3616     __ Ret(HasArgsInRegisters() ? 0 : 2);
   3617 
   3618     __ bind(&miss);
   3619   }
   3620 
   3621   // Get the prototype of the function.
   3622   __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
   3623 
   3624   // Check that the function prototype is a JS object.
   3625   __ JumpIfSmi(prototype, &slow);
   3626   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
   3627 
   3628   // Update the global instanceof or call site inlined cache with the current
   3629   // map and function. The cached answer will be set when it is known below.
   3630   if (!HasCallSiteInlineCheck()) {
   3631     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
   3632     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   3633   } else {
   3634     ASSERT(HasArgsInRegisters());
   3635     // Patch the (relocated) inlined map check.
   3636 
   3637     // The offset was stored in r4 safepoint slot.
   3638     // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
   3639     __ LoadFromSafepointRegisterSlot(scratch, r4);
   3640     __ sub(inline_site, lr, scratch);
   3641     // Get the map location in scratch and patch it.
   3642     __ GetRelocatedValueLocation(inline_site, scratch);
   3643     __ str(map, MemOperand(scratch));
   3644   }
   3645 
   3646   // Register mapping: r3 is object map and r4 is function prototype.
   3647   // Get prototype of object into r2.
   3648   __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
   3649 
   3650   // We don't need map any more. Use it as a scratch register.
   3651   Register scratch2 = map;
   3652   map = no_reg;
   3653 
   3654   // Loop through the prototype chain looking for the function prototype.
   3655   __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
   3656   __ bind(&loop);
   3657   __ cmp(scratch, Operand(prototype));
   3658   __ b(eq, &is_instance);
   3659   __ cmp(scratch, scratch2);
   3660   __ b(eq, &is_not_instance);
   3661   __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
   3662   __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
   3663   __ jmp(&loop);
   3664 
   3665   __ bind(&is_instance);
   3666   if (!HasCallSiteInlineCheck()) {
   3667     __ mov(r0, Operand(Smi::FromInt(0)));
   3668     __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
   3669   } else {
   3670     // Patch the call site to return true.
   3671     __ LoadRoot(r0, Heap::kTrueValueRootIndex);
   3672     __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
   3673     // Get the boolean result location in scratch and patch it.
   3674     __ GetRelocatedValueLocation(inline_site, scratch);
   3675     __ str(r0, MemOperand(scratch));
   3676 
   3677     if (!ReturnTrueFalseObject()) {
   3678       __ mov(r0, Operand(Smi::FromInt(0)));
   3679     }
   3680   }
   3681   __ Ret(HasArgsInRegisters() ? 0 : 2);
   3682 
   3683   __ bind(&is_not_instance);
   3684   if (!HasCallSiteInlineCheck()) {
   3685     __ mov(r0, Operand(Smi::FromInt(1)));
   3686     __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
   3687   } else {
   3688     // Patch the call site to return false.
   3689     __ LoadRoot(r0, Heap::kFalseValueRootIndex);
   3690     __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
   3691     // Get the boolean result location in scratch and patch it.
   3692     __ GetRelocatedValueLocation(inline_site, scratch);
   3693     __ str(r0, MemOperand(scratch));
   3694 
   3695     if (!ReturnTrueFalseObject()) {
   3696       __ mov(r0, Operand(Smi::FromInt(1)));
   3697     }
   3698   }
   3699   __ Ret(HasArgsInRegisters() ? 0 : 2);
   3700 
   3701   Label object_not_null, object_not_null_or_smi;
   3702   __ bind(&not_js_object);
   3703   // Before null, smi and string value checks, check that the rhs is a function
   3704   // as for a non-function rhs an exception needs to be thrown.
   3705   __ JumpIfSmi(function, &slow);
   3706   __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
   3707   __ b(ne, &slow);
   3708 
   3709   // Null is not instance of anything.
   3710   __ cmp(scratch, Operand(FACTORY->null_value()));
   3711   __ b(ne, &object_not_null);
   3712   __ mov(r0, Operand(Smi::FromInt(1)));
   3713   __ Ret(HasArgsInRegisters() ? 0 : 2);
   3714 
   3715   __ bind(&object_not_null);
   3716   // Smi values are not instances of anything.
   3717   __ JumpIfNotSmi(object, &object_not_null_or_smi);
   3718   __ mov(r0, Operand(Smi::FromInt(1)));
   3719   __ Ret(HasArgsInRegisters() ? 0 : 2);
   3720 
   3721   __ bind(&object_not_null_or_smi);
   3722   // String values are not instances of anything.
   3723   __ IsObjectJSStringType(object, scratch, &slow);
   3724   __ mov(r0, Operand(Smi::FromInt(1)));
   3725   __ Ret(HasArgsInRegisters() ? 0 : 2);
   3726 
   3727   // Slow-case.  Tail call builtin.
   3728   __ bind(&slow);
   3729   if (!ReturnTrueFalseObject()) {
   3730     if (HasArgsInRegisters()) {
   3731       __ Push(r0, r1);
   3732     }
   3733   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
   3734   } else {
   3735     __ EnterInternalFrame();
   3736     __ Push(r0, r1);
   3737     __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
   3738     __ LeaveInternalFrame();
   3739     __ cmp(r0, Operand(0));
   3740     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
   3741     __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
   3742     __ Ret(HasArgsInRegisters() ? 0 : 2);
   3743   }
   3744 }
   3745 
   3746 
   3747 Register InstanceofStub::left() { return r0; }
   3748 
   3749 
   3750 Register InstanceofStub::right() { return r1; }
   3751 
   3752 
   3753 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   3754   // The displacement is the offset of the last parameter (if any)
   3755   // relative to the frame pointer.
   3756   static const int kDisplacement =
   3757       StandardFrameConstants::kCallerSPOffset - kPointerSize;
   3758 
   3759   // Check that the key is a smi.
   3760   Label slow;
   3761   __ JumpIfNotSmi(r1, &slow);
   3762 
   3763   // Check if the calling frame is an arguments adaptor frame.
   3764   Label adaptor;
   3765   __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3766   __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
   3767   __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3768   __ b(eq, &adaptor);
   3769 
   3770   // Check index against formal parameters count limit passed in
   3771   // through register r0. Use unsigned comparison to get negative
   3772   // check for free.
   3773   __ cmp(r1, r0);
   3774   __ b(hs, &slow);
   3775 
   3776   // Read the argument from the stack and return it.
   3777   __ sub(r3, r0, r1);
   3778   __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
   3779   __ ldr(r0, MemOperand(r3, kDisplacement));
   3780   __ Jump(lr);
   3781 
   3782   // Arguments adaptor case: Check index against actual arguments
   3783   // limit found in the arguments adaptor frame. Use unsigned
   3784   // comparison to get negative check for free.
   3785   __ bind(&adaptor);
   3786   __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3787   __ cmp(r1, r0);
   3788   __ b(cs, &slow);
   3789 
   3790   // Read the argument from the adaptor frame and return it.
   3791   __ sub(r3, r0, r1);
   3792   __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
   3793   __ ldr(r0, MemOperand(r3, kDisplacement));
   3794   __ Jump(lr);
   3795 
   3796   // Slow-case: Handle non-smi or out-of-bounds access to arguments
   3797   // by calling the runtime system.
   3798   __ bind(&slow);
   3799   __ push(r1);
   3800   __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
   3801 }
   3802 
   3803 
   3804 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
   3805   // sp[0] : number of parameters
   3806   // sp[4] : receiver displacement
   3807   // sp[8] : function
   3808 
   3809   // Check if the calling frame is an arguments adaptor frame.
   3810   Label adaptor_frame, try_allocate, runtime;
   3811   __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3812   __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
   3813   __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3814   __ b(eq, &adaptor_frame);
   3815 
   3816   // Get the length from the frame.
   3817   __ ldr(r1, MemOperand(sp, 0));
   3818   __ b(&try_allocate);
   3819 
   3820   // Patch the arguments.length and the parameters pointer.
   3821   __ bind(&adaptor_frame);
   3822   __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3823   __ str(r1, MemOperand(sp, 0));
   3824   __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
   3825   __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
   3826   __ str(r3, MemOperand(sp, 1 * kPointerSize));
   3827 
   3828   // Try the new space allocation. Start out with computing the size
   3829   // of the arguments object and the elements array in words.
   3830   Label add_arguments_object;
   3831   __ bind(&try_allocate);
   3832   __ cmp(r1, Operand(0, RelocInfo::NONE));
   3833   __ b(eq, &add_arguments_object);
   3834   __ mov(r1, Operand(r1, LSR, kSmiTagSize));
   3835   __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
   3836   __ bind(&add_arguments_object);
   3837   __ add(r1, r1, Operand(GetArgumentsObjectSize() / kPointerSize));
   3838 
   3839   // Do the allocation of both objects in one go.
   3840   __ AllocateInNewSpace(
   3841       r1,
   3842       r0,
   3843       r2,
   3844       r3,
   3845       &runtime,
   3846       static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
   3847 
   3848   // Get the arguments boilerplate from the current (global) context.
   3849   __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   3850   __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
   3851   __ ldr(r4, MemOperand(r4,
   3852                         Context::SlotOffset(GetArgumentsBoilerplateIndex())));
   3853 
   3854   // Copy the JS object part.
   3855   __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
   3856 
   3857   if (type_ == NEW_NON_STRICT) {
   3858     // Setup the callee in-object property.
   3859     STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   3860     __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
   3861     const int kCalleeOffset = JSObject::kHeaderSize +
   3862                               Heap::kArgumentsCalleeIndex * kPointerSize;
   3863     __ str(r3, FieldMemOperand(r0, kCalleeOffset));
   3864   }
   3865 
   3866   // Get the length (smi tagged) and set that as an in-object property too.
   3867   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   3868   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
   3869   __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
   3870                                  Heap::kArgumentsLengthIndex * kPointerSize));
   3871 
   3872   // If there are no actual arguments, we're done.
   3873   Label done;
   3874   __ cmp(r1, Operand(0, RelocInfo::NONE));
   3875   __ b(eq, &done);
   3876 
   3877   // Get the parameters pointer from the stack.
   3878   __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
   3879 
   3880   // Setup the elements pointer in the allocated arguments object and
   3881   // initialize the header in the elements fixed array.
   3882   __ add(r4, r0, Operand(GetArgumentsObjectSize()));
   3883   __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
   3884   __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
   3885   __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
   3886   __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
   3887   __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop.
   3888 
   3889   // Copy the fixed array slots.
   3890   Label loop;
   3891   // Setup r4 to point to the first array slot.
   3892   __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   3893   __ bind(&loop);
   3894   // Pre-decrement r2 with kPointerSize on each iteration.
   3895   // Pre-decrement in order to skip receiver.
   3896   __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
   3897   // Post-increment r4 with kPointerSize on each iteration.
   3898   __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
   3899   __ sub(r1, r1, Operand(1));
   3900   __ cmp(r1, Operand(0, RelocInfo::NONE));
   3901   __ b(ne, &loop);
   3902 
   3903   // Return and remove the on-stack parameters.
   3904   __ bind(&done);
   3905   __ add(sp, sp, Operand(3 * kPointerSize));
   3906   __ Ret();
   3907 
   3908   // Do the runtime call to allocate the arguments object.
   3909   __ bind(&runtime);
   3910   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
   3911 }
   3912 
   3913 
   3914 void RegExpExecStub::Generate(MacroAssembler* masm) {
   3915   // Just jump directly to runtime if native RegExp is not selected at compile
   3916   // time or if regexp entry in generated code is turned off runtime switch or
   3917   // at compilation.
   3918 #ifdef V8_INTERPRETED_REGEXP
   3919   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
   3920 #else  // V8_INTERPRETED_REGEXP
   3921   if (!FLAG_regexp_entry_native) {
   3922     __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
   3923     return;
   3924   }
   3925 
   3926   // Stack frame on entry.
   3927   //  sp[0]: last_match_info (expected JSArray)
   3928   //  sp[4]: previous index
   3929   //  sp[8]: subject string
   3930   //  sp[12]: JSRegExp object
   3931 
   3932   static const int kLastMatchInfoOffset = 0 * kPointerSize;
   3933   static const int kPreviousIndexOffset = 1 * kPointerSize;
   3934   static const int kSubjectOffset = 2 * kPointerSize;
   3935   static const int kJSRegExpOffset = 3 * kPointerSize;
   3936 
   3937   Label runtime, invoke_regexp;
   3938 
   3939   // Allocation of registers for this function. These are in callee save
   3940   // registers and will be preserved by the call to the native RegExp code, as
   3941   // this code is called using the normal C calling convention. When calling
   3942   // directly from generated code the native RegExp code will not do a GC and
   3943   // therefore the content of these registers are safe to use after the call.
   3944   Register subject = r4;
   3945   Register regexp_data = r5;
   3946   Register last_match_info_elements = r6;
   3947 
   3948   // Ensure that a RegExp stack is allocated.
   3949   Isolate* isolate = masm->isolate();
   3950   ExternalReference address_of_regexp_stack_memory_address =
   3951       ExternalReference::address_of_regexp_stack_memory_address(isolate);
   3952   ExternalReference address_of_regexp_stack_memory_size =
   3953       ExternalReference::address_of_regexp_stack_memory_size(isolate);
   3954   __ mov(r0, Operand(address_of_regexp_stack_memory_size));
   3955   __ ldr(r0, MemOperand(r0, 0));
   3956   __ tst(r0, Operand(r0));
   3957   __ b(eq, &runtime);
   3958 
   3959   // Check that the first argument is a JSRegExp object.
   3960   __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
   3961   STATIC_ASSERT(kSmiTag == 0);
   3962   __ tst(r0, Operand(kSmiTagMask));
   3963   __ b(eq, &runtime);
   3964   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
   3965   __ b(ne, &runtime);
   3966 
   3967   // Check that the RegExp has been compiled (data contains a fixed array).
   3968   __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
   3969   if (FLAG_debug_code) {
   3970     __ tst(regexp_data, Operand(kSmiTagMask));
   3971     __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
   3972     __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
   3973     __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
   3974   }
   3975 
   3976   // regexp_data: RegExp data (FixedArray)
   3977   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   3978   __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   3979   __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
   3980   __ b(ne, &runtime);
   3981 
   3982   // regexp_data: RegExp data (FixedArray)
   3983   // Check that the number of captures fit in the static offsets vector buffer.
   3984   __ ldr(r2,
   3985          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   3986   // Calculate number of capture registers (number_of_captures + 1) * 2. This
   3987   // uses the asumption that smis are 2 * their untagged value.
   3988   STATIC_ASSERT(kSmiTag == 0);
   3989   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   3990   __ add(r2, r2, Operand(2));  // r2 was a smi.
   3991   // Check that the static offsets vector buffer is large enough.
   3992   __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
   3993   __ b(hi, &runtime);
   3994 
   3995   // r2: Number of capture registers
   3996   // regexp_data: RegExp data (FixedArray)
   3997   // Check that the second argument is a string.
   3998   __ ldr(subject, MemOperand(sp, kSubjectOffset));
   3999   __ tst(subject, Operand(kSmiTagMask));
   4000   __ b(eq, &runtime);
   4001   Condition is_string = masm->IsObjectStringType(subject, r0);
   4002   __ b(NegateCondition(is_string), &runtime);
   4003   // Get the length of the string to r3.
   4004   __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
   4005 
   4006   // r2: Number of capture registers
   4007   // r3: Length of subject string as a smi
   4008   // subject: Subject string
   4009   // regexp_data: RegExp data (FixedArray)
   4010   // Check that the third argument is a positive smi less than the subject
   4011   // string length. A negative value will be greater (unsigned comparison).
   4012   __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
   4013   __ tst(r0, Operand(kSmiTagMask));
   4014   __ b(ne, &runtime);
   4015   __ cmp(r3, Operand(r0));
   4016   __ b(ls, &runtime);
   4017 
   4018   // r2: Number of capture registers
   4019   // subject: Subject string
   4020   // regexp_data: RegExp data (FixedArray)
   4021   // Check that the fourth object is a JSArray object.
   4022   __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
   4023   __ tst(r0, Operand(kSmiTagMask));
   4024   __ b(eq, &runtime);
   4025   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
   4026   __ b(ne, &runtime);
   4027   // Check that the JSArray is in fast case.
   4028   __ ldr(last_match_info_elements,
   4029          FieldMemOperand(r0, JSArray::kElementsOffset));
   4030   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   4031   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   4032   __ cmp(r0, ip);
   4033   __ b(ne, &runtime);
   4034   // Check that the last match info has space for the capture registers and the
   4035   // additional information.
   4036   __ ldr(r0,
   4037          FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
   4038   __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
   4039   __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
   4040   __ b(gt, &runtime);
   4041 
   4042   // subject: Subject string
   4043   // regexp_data: RegExp data (FixedArray)
   4044   // Check the representation and encoding of the subject string.
   4045   Label seq_string;
   4046   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   4047   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   4048   // First check for flat string.
   4049   __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
   4050   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   4051   __ b(eq, &seq_string);
   4052 
   4053   // subject: Subject string
   4054   // regexp_data: RegExp data (FixedArray)
   4055   // Check for flat cons string.
   4056   // A flat cons string is a cons string where the second part is the empty
   4057   // string. In that case the subject string is just the first part of the cons
   4058   // string. Also in this case the first part of the cons string is known to be
   4059   // a sequential string or an external string.
   4060   STATIC_ASSERT(kExternalStringTag !=0);
   4061   STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
   4062   __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
   4063   __ b(ne, &runtime);
   4064   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
   4065   __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
   4066   __ cmp(r0, r1);
   4067   __ b(ne, &runtime);
   4068   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   4069   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   4070   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   4071   // Is first part a flat string?
   4072   STATIC_ASSERT(kSeqStringTag == 0);
   4073   __ tst(r0, Operand(kStringRepresentationMask));
   4074   __ b(ne, &runtime);
   4075 
   4076   __ bind(&seq_string);
   4077   // subject: Subject string
   4078   // regexp_data: RegExp data (FixedArray)
   4079   // r0: Instance type of subject string
   4080   STATIC_ASSERT(4 == kAsciiStringTag);
   4081   STATIC_ASSERT(kTwoByteStringTag == 0);
   4082   // Find the code object based on the assumptions above.
   4083   __ and_(r0, r0, Operand(kStringEncodingMask));
   4084   __ mov(r3, Operand(r0, ASR, 2), SetCC);
   4085   __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
   4086   __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
   4087 
   4088   // Check that the irregexp code has been generated for the actual string
   4089   // encoding. If it has, the field contains a code object otherwise it contains
   4090   // the hole.
   4091   __ CompareObjectType(r7, r0, r0, CODE_TYPE);
   4092   __ b(ne, &runtime);
   4093 
   4094   // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
   4095   // r7: code
   4096   // subject: Subject string
   4097   // regexp_data: RegExp data (FixedArray)
   4098   // Load used arguments before starting to push arguments for call to native
   4099   // RegExp code to avoid handling changing stack height.
   4100   __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
   4101   __ mov(r1, Operand(r1, ASR, kSmiTagSize));
   4102 
   4103   // r1: previous index
   4104   // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
   4105   // r7: code
   4106   // subject: Subject string
   4107   // regexp_data: RegExp data (FixedArray)
   4108   // All checks done. Now push arguments for native regexp code.
   4109   __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
   4110 
   4111   // Isolates: note we add an additional parameter here (isolate pointer).
   4112   static const int kRegExpExecuteArguments = 8;
   4113   static const int kParameterRegisters = 4;
   4114   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
   4115 
   4116   // Stack pointer now points to cell where return address is to be written.
   4117   // Arguments are before that on the stack or in registers.
   4118 
   4119   // Argument 8 (sp[16]): Pass current isolate address.
   4120   __ mov(r0, Operand(ExternalReference::isolate_address()));
   4121   __ str(r0, MemOperand(sp, 4 * kPointerSize));
   4122 
   4123   // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
   4124   __ mov(r0, Operand(1));
   4125   __ str(r0, MemOperand(sp, 3 * kPointerSize));
   4126 
   4127   // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
   4128   __ mov(r0, Operand(address_of_regexp_stack_memory_address));
   4129   __ ldr(r0, MemOperand(r0, 0));
   4130   __ mov(r2, Operand(address_of_regexp_stack_memory_size));
   4131   __ ldr(r2, MemOperand(r2, 0));
   4132   __ add(r0, r0, Operand(r2));
   4133   __ str(r0, MemOperand(sp, 2 * kPointerSize));
   4134 
   4135   // Argument 5 (sp[4]): static offsets vector buffer.
   4136   __ mov(r0,
   4137          Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
   4138   __ str(r0, MemOperand(sp, 1 * kPointerSize));
   4139 
   4140   // For arguments 4 and 3 get string length, calculate start of string data and
   4141   // calculate the shift of the index (0 for ASCII and 1 for two byte).
   4142   __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
   4143   __ mov(r0, Operand(r0, ASR, kSmiTagSize));
   4144   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
   4145   __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   4146   __ eor(r3, r3, Operand(1));
   4147   // Argument 4 (r3): End of string data
   4148   // Argument 3 (r2): Start of string data
   4149   __ add(r2, r9, Operand(r1, LSL, r3));
   4150   __ add(r3, r9, Operand(r0, LSL, r3));
   4151 
   4152   // Argument 2 (r1): Previous index.
   4153   // Already there
   4154 
   4155   // Argument 1 (r0): Subject string.
   4156   __ mov(r0, subject);
   4157 
   4158   // Locate the code entry and call it.
   4159   __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
   4160   DirectCEntryStub stub;
   4161   stub.GenerateCall(masm, r7);
   4162 
   4163   __ LeaveExitFrame(false, no_reg);
   4164 
   4165   // r0: result
   4166   // subject: subject string (callee saved)
   4167   // regexp_data: RegExp data (callee saved)
   4168   // last_match_info_elements: Last match info elements (callee saved)
   4169 
   4170   // Check the result.
   4171   Label success;
   4172 
   4173   __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
   4174   __ b(eq, &success);
   4175   Label failure;
   4176   __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
   4177   __ b(eq, &failure);
   4178   __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   4179   // If not exception it can only be retry. Handle that in the runtime system.
   4180   __ b(ne, &runtime);
   4181   // Result must now be exception. If there is no pending exception already a
   4182   // stack overflow (on the backtrack stack) was detected in RegExp code but
   4183   // haven't created the exception yet. Handle that in the runtime system.
   4184   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   4185   __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
   4186   __ ldr(r1, MemOperand(r1, 0));
   4187   __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
   4188                                        isolate)));
   4189   __ ldr(r0, MemOperand(r2, 0));
   4190   __ cmp(r0, r1);
   4191   __ b(eq, &runtime);
   4192 
   4193   __ str(r1, MemOperand(r2, 0));  // Clear pending exception.
   4194 
   4195   // Check if the exception is a termination. If so, throw as uncatchable.
   4196   __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
   4197   __ cmp(r0, ip);
   4198   Label termination_exception;
   4199   __ b(eq, &termination_exception);
   4200 
   4201   __ Throw(r0);  // Expects thrown value in r0.
   4202 
   4203   __ bind(&termination_exception);
   4204   __ ThrowUncatchable(TERMINATION, r0);  // Expects thrown value in r0.
   4205 
   4206   __ bind(&failure);
   4207   // For failure and exception return null.
   4208   __ mov(r0, Operand(FACTORY->null_value()));
   4209   __ add(sp, sp, Operand(4 * kPointerSize));
   4210   __ Ret();
   4211 
   4212   // Process the result from the native regexp code.
   4213   __ bind(&success);
   4214   __ ldr(r1,
   4215          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   4216   // Calculate number of capture registers (number_of_captures + 1) * 2.
   4217   STATIC_ASSERT(kSmiTag == 0);
   4218   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   4219   __ add(r1, r1, Operand(2));  // r1 was a smi.
   4220 
   4221   // r1: number of capture registers
   4222   // r4: subject string
   4223   // Store the capture count.
   4224   __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
   4225   __ str(r2, FieldMemOperand(last_match_info_elements,
   4226                              RegExpImpl::kLastCaptureCountOffset));
   4227   // Store last subject and last input.
   4228   __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
   4229   __ str(subject,
   4230          FieldMemOperand(last_match_info_elements,
   4231                          RegExpImpl::kLastSubjectOffset));
   4232   __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
   4233   __ str(subject,
   4234          FieldMemOperand(last_match_info_elements,
   4235                          RegExpImpl::kLastInputOffset));
   4236   __ mov(r3, last_match_info_elements);
   4237   __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
   4238 
   4239   // Get the static offsets vector filled by the native regexp code.
   4240   ExternalReference address_of_static_offsets_vector =
   4241       ExternalReference::address_of_static_offsets_vector(isolate);
   4242   __ mov(r2, Operand(address_of_static_offsets_vector));
   4243 
   4244   // r1: number of capture registers
   4245   // r2: offsets vector
   4246   Label next_capture, done;
   4247   // Capture register counter starts from number of capture registers and
   4248   // counts down until wraping after zero.
   4249   __ add(r0,
   4250          last_match_info_elements,
   4251          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
   4252   __ bind(&next_capture);
   4253   __ sub(r1, r1, Operand(1), SetCC);
   4254   __ b(mi, &done);
   4255   // Read the value from the static offsets vector buffer.
   4256   __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
   4257   // Store the smi value in the last match info.
   4258   __ mov(r3, Operand(r3, LSL, kSmiTagSize));
   4259   __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
   4260   __ jmp(&next_capture);
   4261   __ bind(&done);
   4262 
   4263   // Return last match info.
   4264   __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
   4265   __ add(sp, sp, Operand(4 * kPointerSize));
   4266   __ Ret();
   4267 
   4268   // Do the runtime call to execute the regexp.
   4269   __ bind(&runtime);
   4270   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
   4271 #endif  // V8_INTERPRETED_REGEXP
   4272 }
   4273 
   4274 
   4275 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
   4276   const int kMaxInlineLength = 100;
   4277   Label slowcase;
   4278   Label done;
   4279   __ ldr(r1, MemOperand(sp, kPointerSize * 2));
   4280   STATIC_ASSERT(kSmiTag == 0);
   4281   STATIC_ASSERT(kSmiTagSize == 1);
   4282   __ tst(r1, Operand(kSmiTagMask));
   4283   __ b(ne, &slowcase);
   4284   __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
   4285   __ b(hi, &slowcase);
   4286   // Smi-tagging is equivalent to multiplying by 2.
   4287   // Allocate RegExpResult followed by FixedArray with size in ebx.
   4288   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
   4289   // Elements:  [Map][Length][..elements..]
   4290   // Size of JSArray with two in-object properties and the header of a
   4291   // FixedArray.
   4292   int objects_size =
   4293       (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
   4294   __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
   4295   __ add(r2, r5, Operand(objects_size));
   4296   __ AllocateInNewSpace(
   4297       r2,  // In: Size, in words.
   4298       r0,  // Out: Start of allocation (tagged).
   4299       r3,  // Scratch register.
   4300       r4,  // Scratch register.
   4301       &slowcase,
   4302       static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
   4303   // r0: Start of allocated area, object-tagged.
   4304   // r1: Number of elements in array, as smi.
   4305   // r5: Number of elements, untagged.
   4306 
   4307   // Set JSArray map to global.regexp_result_map().
   4308   // Set empty properties FixedArray.
   4309   // Set elements to point to FixedArray allocated right after the JSArray.
   4310   // Interleave operations for better latency.
   4311   __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
   4312   __ add(r3, r0, Operand(JSRegExpResult::kSize));
   4313   __ mov(r4, Operand(FACTORY->empty_fixed_array()));
   4314   __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
   4315   __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
   4316   __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
   4317   __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   4318   __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
   4319 
   4320   // Set input, index and length fields from arguments.
   4321   __ ldr(r1, MemOperand(sp, kPointerSize * 0));
   4322   __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
   4323   __ ldr(r1, MemOperand(sp, kPointerSize * 1));
   4324   __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
   4325   __ ldr(r1, MemOperand(sp, kPointerSize * 2));
   4326   __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
   4327 
   4328   // Fill out the elements FixedArray.
   4329   // r0: JSArray, tagged.
   4330   // r3: FixedArray, tagged.
   4331   // r5: Number of elements in array, untagged.
   4332 
   4333   // Set map.
   4334   __ mov(r2, Operand(FACTORY->fixed_array_map()));
   4335   __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
   4336   // Set FixedArray length.
   4337   __ mov(r6, Operand(r5, LSL, kSmiTagSize));
   4338   __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
   4339   // Fill contents of fixed-array with the-hole.
   4340   __ mov(r2, Operand(FACTORY->the_hole_value()));
   4341   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   4342   // Fill fixed array elements with hole.
   4343   // r0: JSArray, tagged.
   4344   // r2: the hole.
   4345   // r3: Start of elements in FixedArray.
   4346   // r5: Number of elements to fill.
   4347   Label loop;
   4348   __ tst(r5, Operand(r5));
   4349   __ bind(&loop);
   4350   __ b(le, &done);  // Jump if r1 is negative or zero.
   4351   __ sub(r5, r5, Operand(1), SetCC);
   4352   __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
   4353   __ jmp(&loop);
   4354 
   4355   __ bind(&done);
   4356   __ add(sp, sp, Operand(3 * kPointerSize));
   4357   __ Ret();
   4358 
   4359   __ bind(&slowcase);
   4360   __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
   4361 }
   4362 
   4363 
   4364 void CallFunctionStub::Generate(MacroAssembler* masm) {
   4365   Label slow;
   4366 
   4367   // If the receiver might be a value (string, number or boolean) check for this
   4368   // and box it if it is.
   4369   if (ReceiverMightBeValue()) {
   4370     // Get the receiver from the stack.
   4371     // function, receiver [, arguments]
   4372     Label receiver_is_value, receiver_is_js_object;
   4373     __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
   4374 
   4375     // Check if receiver is a smi (which is a number value).
   4376     __ JumpIfSmi(r1, &receiver_is_value);
   4377 
   4378     // Check if the receiver is a valid JS object.
   4379     __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
   4380     __ b(ge, &receiver_is_js_object);
   4381 
   4382     // Call the runtime to box the value.
   4383     __ bind(&receiver_is_value);
   4384     __ EnterInternalFrame();
   4385     __ push(r1);
   4386     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
   4387     __ LeaveInternalFrame();
   4388     __ str(r0, MemOperand(sp, argc_ * kPointerSize));
   4389 
   4390     __ bind(&receiver_is_js_object);
   4391   }
   4392 
   4393   // Get the function to call from the stack.
   4394   // function, receiver [, arguments]
   4395   __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
   4396 
   4397   // Check that the function is really a JavaScript function.
   4398   // r1: pushed function (to be verified)
   4399   __ JumpIfSmi(r1, &slow);
   4400   // Get the map of the function object.
   4401   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   4402   __ b(ne, &slow);
   4403 
   4404   // Fast-case: Invoke the function now.
   4405   // r1: pushed function
   4406   ParameterCount actual(argc_);
   4407   __ InvokeFunction(r1, actual, JUMP_FUNCTION);
   4408 
   4409   // Slow-case: Non-function called.
   4410   __ bind(&slow);
   4411   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   4412   // of the original receiver from the call site).
   4413   __ str(r1, MemOperand(sp, argc_ * kPointerSize));
   4414   __ mov(r0, Operand(argc_));  // Setup the number of arguments.
   4415   __ mov(r2, Operand(0, RelocInfo::NONE));
   4416   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
   4417   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
   4418           RelocInfo::CODE_TARGET);
   4419 }
   4420 
   4421 
   4422 // Unfortunately you have to run without snapshots to see most of these
   4423 // names in the profile since most compare stubs end up in the snapshot.
   4424 const char* CompareStub::GetName() {
   4425   ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
   4426          (lhs_.is(r1) && rhs_.is(r0)));
   4427 
   4428   if (name_ != NULL) return name_;
   4429   const int kMaxNameLength = 100;
   4430   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
   4431       kMaxNameLength);
   4432   if (name_ == NULL) return "OOM";
   4433 
   4434   const char* cc_name;
   4435   switch (cc_) {
   4436     case lt: cc_name = "LT"; break;
   4437     case gt: cc_name = "GT"; break;
   4438     case le: cc_name = "LE"; break;
   4439     case ge: cc_name = "GE"; break;
   4440     case eq: cc_name = "EQ"; break;
   4441     case ne: cc_name = "NE"; break;
   4442     default: cc_name = "UnknownCondition"; break;
   4443   }
   4444 
   4445   const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
   4446   const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
   4447 
   4448   const char* strict_name = "";
   4449   if (strict_ && (cc_ == eq || cc_ == ne)) {
   4450     strict_name = "_STRICT";
   4451   }
   4452 
   4453   const char* never_nan_nan_name = "";
   4454   if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
   4455     never_nan_nan_name = "_NO_NAN";
   4456   }
   4457 
   4458   const char* include_number_compare_name = "";
   4459   if (!include_number_compare_) {
   4460     include_number_compare_name = "_NO_NUMBER";
   4461   }
   4462 
   4463   const char* include_smi_compare_name = "";
   4464   if (!include_smi_compare_) {
   4465     include_smi_compare_name = "_NO_SMI";
   4466   }
   4467 
   4468   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
   4469                "CompareStub_%s%s%s%s%s%s",
   4470                cc_name,
   4471                lhs_name,
   4472                rhs_name,
   4473                strict_name,
   4474                never_nan_nan_name,
   4475                include_number_compare_name,
   4476                include_smi_compare_name);
   4477   return name_;
   4478 }
   4479 
   4480 
   4481 int CompareStub::MinorKey() {
   4482   // Encode the three parameters in a unique 16 bit value. To avoid duplicate
   4483   // stubs the never NaN NaN condition is only taken into account if the
   4484   // condition is equals.
   4485   ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
   4486   ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
   4487          (lhs_.is(r1) && rhs_.is(r0)));
   4488   return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
   4489          | RegisterField::encode(lhs_.is(r0))
   4490          | StrictField::encode(strict_)
   4491          | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
   4492          | IncludeNumberCompareField::encode(include_number_compare_)
   4493          | IncludeSmiCompareField::encode(include_smi_compare_);
   4494 }
   4495 
   4496 
   4497 // StringCharCodeAtGenerator
   4498 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   4499   Label flat_string;
   4500   Label ascii_string;
   4501   Label got_char_code;
   4502 
   4503   // If the receiver is a smi trigger the non-string case.
   4504   __ JumpIfSmi(object_, receiver_not_string_);
   4505 
   4506   // Fetch the instance type of the receiver into result register.
   4507   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   4508   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   4509   // If the receiver is not a string trigger the non-string case.
   4510   __ tst(result_, Operand(kIsNotStringMask));
   4511   __ b(ne, receiver_not_string_);
   4512 
   4513   // If the index is non-smi trigger the non-smi case.
   4514   __ JumpIfNotSmi(index_, &index_not_smi_);
   4515 
   4516   // Put smi-tagged index into scratch register.
   4517   __ mov(scratch_, index_);
   4518   __ bind(&got_smi_index_);
   4519 
   4520   // Check for index out of range.
   4521   __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
   4522   __ cmp(ip, Operand(scratch_));
   4523   __ b(ls, index_out_of_range_);
   4524 
   4525   // We need special handling for non-flat strings.
   4526   STATIC_ASSERT(kSeqStringTag == 0);
   4527   __ tst(result_, Operand(kStringRepresentationMask));
   4528   __ b(eq, &flat_string);
   4529 
   4530   // Handle non-flat strings.
   4531   __ tst(result_, Operand(kIsConsStringMask));
   4532   __ b(eq, &call_runtime_);
   4533 
   4534   // ConsString.
   4535   // Check whether the right hand side is the empty string (i.e. if
   4536   // this is really a flat string in a cons string). If that is not
   4537   // the case we would rather go to the runtime system now to flatten
   4538   // the string.
   4539   __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
   4540   __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
   4541   __ cmp(result_, Operand(ip));
   4542   __ b(ne, &call_runtime_);
   4543   // Get the first of the two strings and load its instance type.
   4544   __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
   4545   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   4546   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   4547   // If the first cons component is also non-flat, then go to runtime.
   4548   STATIC_ASSERT(kSeqStringTag == 0);
   4549   __ tst(result_, Operand(kStringRepresentationMask));
   4550   __ b(ne, &call_runtime_);
   4551 
   4552   // Check for 1-byte or 2-byte string.
   4553   __ bind(&flat_string);
   4554   STATIC_ASSERT(kAsciiStringTag != 0);
   4555   __ tst(result_, Operand(kStringEncodingMask));
   4556   __ b(ne, &ascii_string);
   4557 
   4558   // 2-byte string.
   4559   // Load the 2-byte character code into the result register. We can
   4560   // add without shifting since the smi tag size is the log2 of the
   4561   // number of bytes in a two-byte character.
   4562   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
   4563   __ add(scratch_, object_, Operand(scratch_));
   4564   __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
   4565   __ jmp(&got_char_code);
   4566 
   4567   // ASCII string.
   4568   // Load the byte into the result register.
   4569   __ bind(&ascii_string);
   4570   __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
   4571   __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
   4572 
   4573   __ bind(&got_char_code);
   4574   __ mov(result_, Operand(result_, LSL, kSmiTagSize));
   4575   __ bind(&exit_);
   4576 }
   4577 
   4578 
   4579 void StringCharCodeAtGenerator::GenerateSlow(
   4580     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   4581   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
   4582 
   4583   // Index is not a smi.
   4584   __ bind(&index_not_smi_);
   4585   // If index is a heap number, try converting it to an integer.
   4586   __ CheckMap(index_,
   4587               scratch_,
   4588               Heap::kHeapNumberMapRootIndex,
   4589               index_not_number_,
   4590               true);
   4591   call_helper.BeforeCall(masm);
   4592   __ Push(object_, index_);
   4593   __ push(index_);  // Consumed by runtime conversion function.
   4594   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
   4595     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   4596   } else {
   4597     ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
   4598     // NumberToSmi discards numbers that are not exact integers.
   4599     __ CallRuntime(Runtime::kNumberToSmi, 1);
   4600   }
   4601   // Save the conversion result before the pop instructions below
   4602   // have a chance to overwrite it.
   4603   __ Move(scratch_, r0);
   4604   __ pop(index_);
   4605   __ pop(object_);
   4606   // Reload the instance type.
   4607   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   4608   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   4609   call_helper.AfterCall(masm);
   4610   // If index is still not a smi, it must be out of range.
   4611   __ JumpIfNotSmi(scratch_, index_out_of_range_);
   4612   // Otherwise, return to the fast path.
   4613   __ jmp(&got_smi_index_);
   4614 
   4615   // Call runtime. We get here when the receiver is a string and the
   4616   // index is a number, but the code of getting the actual character
   4617   // is too complex (e.g., when the string needs to be flattened).
   4618   __ bind(&call_runtime_);
   4619   call_helper.BeforeCall(masm);
   4620   __ Push(object_, index_);
   4621   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   4622   __ Move(result_, r0);
   4623   call_helper.AfterCall(masm);
   4624   __ jmp(&exit_);
   4625 
   4626   __ Abort("Unexpected fallthrough from CharCodeAt slow case");
   4627 }
   4628 
   4629 
   4630 // -------------------------------------------------------------------------
   4631 // StringCharFromCodeGenerator
   4632 
   4633 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   4634   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   4635   STATIC_ASSERT(kSmiTag == 0);
   4636   STATIC_ASSERT(kSmiShiftSize == 0);
   4637   ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
   4638   __ tst(code_,
   4639          Operand(kSmiTagMask |
   4640                  ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
   4641   __ b(ne, &slow_case_);
   4642 
   4643   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   4644   // At this point code register contains smi tagged ASCII char code.
   4645   STATIC_ASSERT(kSmiTag == 0);
   4646   __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   4647   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   4648   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4649   __ cmp(result_, Operand(ip));
   4650   __ b(eq, &slow_case_);
   4651   __ bind(&exit_);
   4652 }
   4653 
   4654 
   4655 void StringCharFromCodeGenerator::GenerateSlow(
   4656     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   4657   __ Abort("Unexpected fallthrough to CharFromCode slow case");
   4658 
   4659   __ bind(&slow_case_);
   4660   call_helper.BeforeCall(masm);
   4661   __ push(code_);
   4662   __ CallRuntime(Runtime::kCharFromCode, 1);
   4663   __ Move(result_, r0);
   4664   call_helper.AfterCall(masm);
   4665   __ jmp(&exit_);
   4666 
   4667   __ Abort("Unexpected fallthrough from CharFromCode slow case");
   4668 }
   4669 
   4670 
   4671 // -------------------------------------------------------------------------
   4672 // StringCharAtGenerator
   4673 
   4674 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
   4675   char_code_at_generator_.GenerateFast(masm);
   4676   char_from_code_generator_.GenerateFast(masm);
   4677 }
   4678 
   4679 
   4680 void StringCharAtGenerator::GenerateSlow(
   4681     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   4682   char_code_at_generator_.GenerateSlow(masm, call_helper);
   4683   char_from_code_generator_.GenerateSlow(masm, call_helper);
   4684 }
   4685 
   4686 
   4687 class StringHelper : public AllStatic {
   4688  public:
   4689   // Generate code for copying characters using a simple loop. This should only
   4690   // be used in places where the number of characters is small and the
   4691   // additional setup and checking in GenerateCopyCharactersLong adds too much
   4692   // overhead. Copying of overlapping regions is not supported.
   4693   // Dest register ends at the position after the last character written.
   4694   static void GenerateCopyCharacters(MacroAssembler* masm,
   4695                                      Register dest,
   4696                                      Register src,
   4697                                      Register count,
   4698                                      Register scratch,
   4699                                      bool ascii);
   4700 
   4701   // Generate code for copying a large number of characters. This function
   4702   // is allowed to spend extra time setting up conditions to make copying
   4703   // faster. Copying of overlapping regions is not supported.
   4704   // Dest register ends at the position after the last character written.
   4705   static void GenerateCopyCharactersLong(MacroAssembler* masm,
   4706                                          Register dest,
   4707                                          Register src,
   4708                                          Register count,
   4709                                          Register scratch1,
   4710                                          Register scratch2,
   4711                                          Register scratch3,
   4712                                          Register scratch4,
   4713                                          Register scratch5,
   4714                                          int flags);
   4715 
   4716 
   4717   // Probe the symbol table for a two character string. If the string is
   4718   // not found by probing a jump to the label not_found is performed. This jump
   4719   // does not guarantee that the string is not in the symbol table. If the
   4720   // string is found the code falls through with the string in register r0.
   4721   // Contents of both c1 and c2 registers are modified. At the exit c1 is
   4722   // guaranteed to contain halfword with low and high bytes equal to
   4723   // initial contents of c1 and c2 respectively.
   4724   static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
   4725                                                    Register c1,
   4726                                                    Register c2,
   4727                                                    Register scratch1,
   4728                                                    Register scratch2,
   4729                                                    Register scratch3,
   4730                                                    Register scratch4,
   4731                                                    Register scratch5,
   4732                                                    Label* not_found);
   4733 
   4734   // Generate string hash.
   4735   static void GenerateHashInit(MacroAssembler* masm,
   4736                                Register hash,
   4737                                Register character);
   4738 
   4739   static void GenerateHashAddCharacter(MacroAssembler* masm,
   4740                                        Register hash,
   4741                                        Register character);
   4742 
   4743   static void GenerateHashGetHash(MacroAssembler* masm,
   4744                                   Register hash);
   4745 
   4746  private:
   4747   DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
   4748 };
   4749 
   4750 
   4751 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
   4752                                           Register dest,
   4753                                           Register src,
   4754                                           Register count,
   4755                                           Register scratch,
   4756                                           bool ascii) {
   4757   Label loop;
   4758   Label done;
   4759   // This loop just copies one character at a time, as it is only used for very
   4760   // short strings.
   4761   if (!ascii) {
   4762     __ add(count, count, Operand(count), SetCC);
   4763   } else {
   4764     __ cmp(count, Operand(0, RelocInfo::NONE));
   4765   }
   4766   __ b(eq, &done);
   4767 
   4768   __ bind(&loop);
   4769   __ ldrb(scratch, MemOperand(src, 1, PostIndex));
   4770   // Perform sub between load and dependent store to get the load time to
   4771   // complete.
   4772   __ sub(count, count, Operand(1), SetCC);
   4773   __ strb(scratch, MemOperand(dest, 1, PostIndex));
   4774   // last iteration.
   4775   __ b(gt, &loop);
   4776 
   4777   __ bind(&done);
   4778 }
   4779 
   4780 
   4781 enum CopyCharactersFlags {
   4782   COPY_ASCII = 1,
   4783   DEST_ALWAYS_ALIGNED = 2
   4784 };
   4785 
   4786 
   4787 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
   4788                                               Register dest,
   4789                                               Register src,
   4790                                               Register count,
   4791                                               Register scratch1,
   4792                                               Register scratch2,
   4793                                               Register scratch3,
   4794                                               Register scratch4,
   4795                                               Register scratch5,
   4796                                               int flags) {
   4797   bool ascii = (flags & COPY_ASCII) != 0;
   4798   bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
   4799 
   4800   if (dest_always_aligned && FLAG_debug_code) {
   4801     // Check that destination is actually word aligned if the flag says
   4802     // that it is.
   4803     __ tst(dest, Operand(kPointerAlignmentMask));
   4804     __ Check(eq, "Destination of copy not aligned.");
   4805   }
   4806 
   4807   const int kReadAlignment = 4;
   4808   const int kReadAlignmentMask = kReadAlignment - 1;
   4809   // Ensure that reading an entire aligned word containing the last character
   4810   // of a string will not read outside the allocated area (because we pad up
   4811   // to kObjectAlignment).
   4812   STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
   4813   // Assumes word reads and writes are little endian.
   4814   // Nothing to do for zero characters.
   4815   Label done;
   4816   if (!ascii) {
   4817     __ add(count, count, Operand(count), SetCC);
   4818   } else {
   4819     __ cmp(count, Operand(0, RelocInfo::NONE));
   4820   }
   4821   __ b(eq, &done);
   4822 
   4823   // Assume that you cannot read (or write) unaligned.
   4824   Label byte_loop;
   4825   // Must copy at least eight bytes, otherwise just do it one byte at a time.
   4826   __ cmp(count, Operand(8));
   4827   __ add(count, dest, Operand(count));
   4828   Register limit = count;  // Read until src equals this.
   4829   __ b(lt, &byte_loop);
   4830 
   4831   if (!dest_always_aligned) {
   4832     // Align dest by byte copying. Copies between zero and three bytes.
   4833     __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
   4834     Label dest_aligned;
   4835     __ b(eq, &dest_aligned);
   4836     __ cmp(scratch4, Operand(2));
   4837     __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
   4838     __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
   4839     __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
   4840     __ strb(scratch1, MemOperand(dest, 1, PostIndex));
   4841     __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
   4842     __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
   4843     __ bind(&dest_aligned);
   4844   }
   4845 
   4846   Label simple_loop;
   4847 
   4848   __ sub(scratch4, dest, Operand(src));
   4849   __ and_(scratch4, scratch4, Operand(0x03), SetCC);
   4850   __ b(eq, &simple_loop);
   4851   // Shift register is number of bits in a source word that
   4852   // must be combined with bits in the next source word in order
   4853   // to create a destination word.
   4854 
   4855   // Complex loop for src/dst that are not aligned the same way.
   4856   {
   4857     Label loop;
   4858     __ mov(scratch4, Operand(scratch4, LSL, 3));
   4859     Register left_shift = scratch4;
   4860     __ and_(src, src, Operand(~3));  // Round down to load previous word.
   4861     __ ldr(scratch1, MemOperand(src, 4, PostIndex));
   4862     // Store the "shift" most significant bits of scratch in the least
   4863     // signficant bits (i.e., shift down by (32-shift)).
   4864     __ rsb(scratch2, left_shift, Operand(32));
   4865     Register right_shift = scratch2;
   4866     __ mov(scratch1, Operand(scratch1, LSR, right_shift));
   4867 
   4868     __ bind(&loop);
   4869     __ ldr(scratch3, MemOperand(src, 4, PostIndex));
   4870     __ sub(scratch5, limit, Operand(dest));
   4871     __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
   4872     __ str(scratch1, MemOperand(dest, 4, PostIndex));
   4873     __ mov(scratch1, Operand(scratch3, LSR, right_shift));
   4874     // Loop if four or more bytes left to copy.
   4875     // Compare to eight, because we did the subtract before increasing dst.
   4876     __ sub(scratch5, scratch5, Operand(8), SetCC);
   4877     __ b(ge, &loop);
   4878   }
   4879   // There is now between zero and three bytes left to copy (negative that
   4880   // number is in scratch5), and between one and three bytes already read into
   4881   // scratch1 (eight times that number in scratch4). We may have read past
   4882   // the end of the string, but because objects are aligned, we have not read
   4883   // past the end of the object.
   4884   // Find the minimum of remaining characters to move and preloaded characters
   4885   // and write those as bytes.
   4886   __ add(scratch5, scratch5, Operand(4), SetCC);
   4887   __ b(eq, &done);
   4888   __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
   4889   // Move minimum of bytes read and bytes left to copy to scratch4.
   4890   __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
   4891   // Between one and three (value in scratch5) characters already read into
   4892   // scratch ready to write.
   4893   __ cmp(scratch5, Operand(2));
   4894   __ strb(scratch1, MemOperand(dest, 1, PostIndex));
   4895   __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
   4896   __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
   4897   __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
   4898   __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
   4899   // Copy any remaining bytes.
   4900   __ b(&byte_loop);
   4901 
   4902   // Simple loop.
   4903   // Copy words from src to dst, until less than four bytes left.
   4904   // Both src and dest are word aligned.
   4905   __ bind(&simple_loop);
   4906   {
   4907     Label loop;
   4908     __ bind(&loop);
   4909     __ ldr(scratch1, MemOperand(src, 4, PostIndex));
   4910     __ sub(scratch3, limit, Operand(dest));
   4911     __ str(scratch1, MemOperand(dest, 4, PostIndex));
   4912     // Compare to 8, not 4, because we do the substraction before increasing
   4913     // dest.
   4914     __ cmp(scratch3, Operand(8));
   4915     __ b(ge, &loop);
   4916   }
   4917 
   4918   // Copy bytes from src to dst until dst hits limit.
   4919   __ bind(&byte_loop);
   4920   __ cmp(dest, Operand(limit));
   4921   __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
   4922   __ b(ge, &done);
   4923   __ strb(scratch1, MemOperand(dest, 1, PostIndex));
   4924   __ b(&byte_loop);
   4925 
   4926   __ bind(&done);
   4927 }
   4928 
   4929 
   4930 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
   4931                                                         Register c1,
   4932                                                         Register c2,
   4933                                                         Register scratch1,
   4934                                                         Register scratch2,
   4935                                                         Register scratch3,
   4936                                                         Register scratch4,
   4937                                                         Register scratch5,
   4938                                                         Label* not_found) {
   4939   // Register scratch3 is the general scratch register in this function.
   4940   Register scratch = scratch3;
   4941 
   4942   // Make sure that both characters are not digits as such strings has a
   4943   // different hash algorithm. Don't try to look for these in the symbol table.
   4944   Label not_array_index;
   4945   __ sub(scratch, c1, Operand(static_cast<int>('0')));
   4946   __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
   4947   __ b(hi, &not_array_index);
   4948   __ sub(scratch, c2, Operand(static_cast<int>('0')));
   4949   __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
   4950 
   4951   // If check failed combine both characters into single halfword.
   4952   // This is required by the contract of the method: code at the
   4953   // not_found branch expects this combination in c1 register
   4954   __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
   4955   __ b(ls, not_found);
   4956 
   4957   __ bind(&not_array_index);
   4958   // Calculate the two character string hash.
   4959   Register hash = scratch1;
   4960   StringHelper::GenerateHashInit(masm, hash, c1);
   4961   StringHelper::GenerateHashAddCharacter(masm, hash, c2);
   4962   StringHelper::GenerateHashGetHash(masm, hash);
   4963 
   4964   // Collect the two characters in a register.
   4965   Register chars = c1;
   4966   __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
   4967 
   4968   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
   4969   // hash:  hash of two character string.
   4970 
   4971   // Load symbol table
   4972   // Load address of first element of the symbol table.
   4973   Register symbol_table = c2;
   4974   __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
   4975 
   4976   Register undefined = scratch4;
   4977   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   4978 
   4979   // Calculate capacity mask from the symbol table capacity.
   4980   Register mask = scratch2;
   4981   __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
   4982   __ mov(mask, Operand(mask, ASR, 1));
   4983   __ sub(mask, mask, Operand(1));
   4984 
   4985   // Calculate untagged address of the first element of the symbol table.
   4986   Register first_symbol_table_element = symbol_table;
   4987   __ add(first_symbol_table_element, symbol_table,
   4988          Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
   4989 
   4990   // Registers
   4991   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
   4992   // hash:  hash of two character string
   4993   // mask:  capacity mask
   4994   // first_symbol_table_element: address of the first element of
   4995   //                             the symbol table
   4996   // undefined: the undefined object
   4997   // scratch: -
   4998 
   4999   // Perform a number of probes in the symbol table.
   5000   static const int kProbes = 4;
   5001   Label found_in_symbol_table;
   5002   Label next_probe[kProbes];
   5003   for (int i = 0; i < kProbes; i++) {
   5004     Register candidate = scratch5;  // Scratch register contains candidate.
   5005 
   5006     // Calculate entry in symbol table.
   5007     if (i > 0) {
   5008       __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
   5009     } else {
   5010       __ mov(candidate, hash);
   5011     }
   5012 
   5013     __ and_(candidate, candidate, Operand(mask));
   5014 
   5015     // Load the entry from the symble table.
   5016     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
   5017     __ ldr(candidate,
   5018            MemOperand(first_symbol_table_element,
   5019                       candidate,
   5020                       LSL,
   5021                       kPointerSizeLog2));
   5022 
   5023     // If entry is undefined no string with this hash can be found.
   5024     Label is_string;
   5025     __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
   5026     __ b(ne, &is_string);
   5027 
   5028     __ cmp(undefined, candidate);
   5029     __ b(eq, not_found);
   5030     // Must be null (deleted entry).
   5031     if (FLAG_debug_code) {
   5032       __ LoadRoot(ip, Heap::kNullValueRootIndex);
   5033       __ cmp(ip, candidate);
   5034       __ Assert(eq, "oddball in symbol table is not undefined or null");
   5035     }
   5036     __ jmp(&next_probe[i]);
   5037 
   5038     __ bind(&is_string);
   5039 
   5040     // Check that the candidate is a non-external ASCII string.  The instance
   5041     // type is still in the scratch register from the CompareObjectType
   5042     // operation.
   5043     __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
   5044 
   5045     // If length is not 2 the string is not a candidate.
   5046     __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
   5047     __ cmp(scratch, Operand(Smi::FromInt(2)));
   5048     __ b(ne, &next_probe[i]);
   5049 
   5050     // Check if the two characters match.
   5051     // Assumes that word load is little endian.
   5052     __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
   5053     __ cmp(chars, scratch);
   5054     __ b(eq, &found_in_symbol_table);
   5055     __ bind(&next_probe[i]);
   5056   }
   5057 
   5058   // No matching 2 character string found by probing.
   5059   __ jmp(not_found);
   5060 
   5061   // Scratch register contains result when we fall through to here.
   5062   Register result = scratch;
   5063   __ bind(&found_in_symbol_table);
   5064   __ Move(r0, result);
   5065 }
   5066 
   5067 
   5068 void StringHelper::GenerateHashInit(MacroAssembler* masm,
   5069                                     Register hash,
   5070                                     Register character) {
   5071   // hash = character + (character << 10);
   5072   __ add(hash, character, Operand(character, LSL, 10));
   5073   // hash ^= hash >> 6;
   5074   __ eor(hash, hash, Operand(hash, ASR, 6));
   5075 }
   5076 
   5077 
   5078 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
   5079                                             Register hash,
   5080                                             Register character) {
   5081   // hash += character;
   5082   __ add(hash, hash, Operand(character));
   5083   // hash += hash << 10;
   5084   __ add(hash, hash, Operand(hash, LSL, 10));
   5085   // hash ^= hash >> 6;
   5086   __ eor(hash, hash, Operand(hash, ASR, 6));
   5087 }
   5088 
   5089 
   5090 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
   5091                                        Register hash) {
   5092   // hash += hash << 3;
   5093   __ add(hash, hash, Operand(hash, LSL, 3));
   5094   // hash ^= hash >> 11;
   5095   __ eor(hash, hash, Operand(hash, ASR, 11));
   5096   // hash += hash << 15;
   5097   __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
   5098 
   5099   // if (hash == 0) hash = 27;
   5100   __ mov(hash, Operand(27), LeaveCC, ne);
   5101 }
   5102 
   5103 
   5104 void SubStringStub::Generate(MacroAssembler* masm) {
   5105   Label runtime;
   5106 
   5107   // Stack frame on entry.
   5108   //  lr: return address
   5109   //  sp[0]: to
   5110   //  sp[4]: from
   5111   //  sp[8]: string
   5112 
   5113   // This stub is called from the native-call %_SubString(...), so
   5114   // nothing can be assumed about the arguments. It is tested that:
   5115   //  "string" is a sequential string,
   5116   //  both "from" and "to" are smis, and
   5117   //  0 <= from <= to <= string.length.
   5118   // If any of these assumptions fail, we call the runtime system.
   5119 
   5120   static const int kToOffset = 0 * kPointerSize;
   5121   static const int kFromOffset = 1 * kPointerSize;
   5122   static const int kStringOffset = 2 * kPointerSize;
   5123 
   5124   // Check bounds and smi-ness.
   5125   Register to = r6;
   5126   Register from = r7;
   5127   __ Ldrd(to, from, MemOperand(sp, kToOffset));
   5128   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   5129   STATIC_ASSERT(kSmiTag == 0);
   5130   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   5131   // I.e., arithmetic shift right by one un-smi-tags.
   5132   __ mov(r2, Operand(to, ASR, 1), SetCC);
   5133   __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
   5134   // If either to or from had the smi tag bit set, then carry is set now.
   5135   __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
   5136   __ b(mi, &runtime);  // From is negative.
   5137 
   5138   // Both to and from are smis.
   5139 
   5140   __ sub(r2, r2, Operand(r3), SetCC);
   5141   __ b(mi, &runtime);  // Fail if from > to.
   5142   // Special handling of sub-strings of length 1 and 2. One character strings
   5143   // are handled in the runtime system (looked up in the single character
   5144   // cache). Two character strings are looked for in the symbol cache.
   5145   __ cmp(r2, Operand(2));
   5146   __ b(lt, &runtime);
   5147 
   5148   // r2: length
   5149   // r3: from index (untaged smi)
   5150   // r6 (a.k.a. to): to (smi)
   5151   // r7 (a.k.a. from): from offset (smi)
   5152 
   5153   // Make sure first argument is a sequential (or flat) string.
   5154   __ ldr(r5, MemOperand(sp, kStringOffset));
   5155   STATIC_ASSERT(kSmiTag == 0);
   5156   __ tst(r5, Operand(kSmiTagMask));
   5157   __ b(eq, &runtime);
   5158   Condition is_string = masm->IsObjectStringType(r5, r1);
   5159   __ b(NegateCondition(is_string), &runtime);
   5160 
   5161   // r1: instance type
   5162   // r2: length
   5163   // r3: from index (untagged smi)
   5164   // r5: string
   5165   // r6 (a.k.a. to): to (smi)
   5166   // r7 (a.k.a. from): from offset (smi)
   5167   Label seq_string;
   5168   __ and_(r4, r1, Operand(kStringRepresentationMask));
   5169   STATIC_ASSERT(kSeqStringTag < kConsStringTag);
   5170   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   5171   __ cmp(r4, Operand(kConsStringTag));
   5172   __ b(gt, &runtime);  // External strings go to runtime.
   5173   __ b(lt, &seq_string);  // Sequential strings are handled directly.
   5174 
   5175   // Cons string. Try to recurse (once) on the first substring.
   5176   // (This adds a little more generality than necessary to handle flattened
   5177   // cons strings, but not much).
   5178   __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
   5179   __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
   5180   __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   5181   __ tst(r1, Operand(kStringRepresentationMask));
   5182   STATIC_ASSERT(kSeqStringTag == 0);
   5183   __ b(ne, &runtime);  // Cons and External strings go to runtime.
   5184 
   5185   // Definitly a sequential string.
   5186   __ bind(&seq_string);
   5187 
   5188   // r1: instance type.
   5189   // r2: length
   5190   // r3: from index (untaged smi)
   5191   // r5: string
   5192   // r6 (a.k.a. to): to (smi)
   5193   // r7 (a.k.a. from): from offset (smi)
   5194   __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
   5195   __ cmp(r4, Operand(to));
   5196   __ b(lt, &runtime);  // Fail if to > length.
   5197   to = no_reg;
   5198 
   5199   // r1: instance type.
   5200   // r2: result string length.
   5201   // r3: from index (untaged smi)
   5202   // r5: string.
   5203   // r7 (a.k.a. from): from offset (smi)
   5204   // Check for flat ASCII string.
   5205   Label non_ascii_flat;
   5206   __ tst(r1, Operand(kStringEncodingMask));
   5207   STATIC_ASSERT(kTwoByteStringTag == 0);
   5208   __ b(eq, &non_ascii_flat);
   5209 
   5210   Label result_longer_than_two;
   5211   __ cmp(r2, Operand(2));
   5212   __ b(gt, &result_longer_than_two);
   5213 
   5214   // Sub string of length 2 requested.
   5215   // Get the two characters forming the sub string.
   5216   __ add(r5, r5, Operand(r3));
   5217   __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
   5218   __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
   5219 
   5220   // Try to lookup two character string in symbol table.
   5221   Label make_two_character_string;
   5222   StringHelper::GenerateTwoCharacterSymbolTableProbe(
   5223       masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
   5224   Counters* counters = masm->isolate()->counters();
   5225   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   5226   __ add(sp, sp, Operand(3 * kPointerSize));
   5227   __ Ret();
   5228 
   5229   // r2: result string length.
   5230   // r3: two characters combined into halfword in little endian byte order.
   5231   __ bind(&make_two_character_string);
   5232   __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
   5233   __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
   5234   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   5235   __ add(sp, sp, Operand(3 * kPointerSize));
   5236   __ Ret();
   5237 
   5238   __ bind(&result_longer_than_two);
   5239 
   5240   // Allocate the result.
   5241   __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
   5242 
   5243   // r0: result string.
   5244   // r2: result string length.
   5245   // r5: string.
   5246   // r7 (a.k.a. from): from offset (smi)
   5247   // Locate first character of result.
   5248   __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   5249   // Locate 'from' character of string.
   5250   __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   5251   __ add(r5, r5, Operand(from, ASR, 1));
   5252 
   5253   // r0: result string.
   5254   // r1: first character of result string.
   5255   // r2: result string length.
   5256   // r5: first character of sub string to copy.
   5257   STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   5258   StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
   5259                                            COPY_ASCII | DEST_ALWAYS_ALIGNED);
   5260   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   5261   __ add(sp, sp, Operand(3 * kPointerSize));
   5262   __ Ret();
   5263 
   5264   __ bind(&non_ascii_flat);
   5265   // r2: result string length.
   5266   // r5: string.
   5267   // r7 (a.k.a. from): from offset (smi)
   5268   // Check for flat two byte string.
   5269 
   5270   // Allocate the result.
   5271   __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
   5272 
   5273   // r0: result string.
   5274   // r2: result string length.
   5275   // r5: string.
   5276   // Locate first character of result.
   5277   __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   5278   // Locate 'from' character of string.
   5279   __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   5280   // As "from" is a smi it is 2 times the value which matches the size of a two
   5281   // byte character.
   5282   __ add(r5, r5, Operand(from));
   5283   from = no_reg;
   5284 
   5285   // r0: result string.
   5286   // r1: first character of result.
   5287   // r2: result length.
   5288   // r5: first character of string to copy.
   5289   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   5290   StringHelper::GenerateCopyCharactersLong(
   5291       masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
   5292   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   5293   __ add(sp, sp, Operand(3 * kPointerSize));
   5294   __ Ret();
   5295 
   5296   // Just jump to runtime to create the sub string.
   5297   __ bind(&runtime);
   5298   __ TailCallRuntime(Runtime::kSubString, 3, 1);
   5299 }
   5300 
   5301 
   5302 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
   5303                                                         Register left,
   5304                                                         Register right,
   5305                                                         Register scratch1,
   5306                                                         Register scratch2,
   5307                                                         Register scratch3,
   5308                                                         Register scratch4) {
   5309   Label compare_lengths;
   5310   // Find minimum length and length difference.
   5311   __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
   5312   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
   5313   __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
   5314   Register length_delta = scratch3;
   5315   __ mov(scratch1, scratch2, LeaveCC, gt);
   5316   Register min_length = scratch1;
   5317   STATIC_ASSERT(kSmiTag == 0);
   5318   __ tst(min_length, Operand(min_length));
   5319   __ b(eq, &compare_lengths);
   5320 
   5321   // Untag smi.
   5322   __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
   5323 
   5324   // Setup registers so that we only need to increment one register
   5325   // in the loop.
   5326   __ add(scratch2, min_length,
   5327          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   5328   __ add(left, left, Operand(scratch2));
   5329   __ add(right, right, Operand(scratch2));
   5330   // Registers left and right points to the min_length character of strings.
   5331   __ rsb(min_length, min_length, Operand(-1));
   5332   Register index = min_length;
   5333   // Index starts at -min_length.
   5334 
   5335   {
   5336     // Compare loop.
   5337     Label loop;
   5338     __ bind(&loop);
   5339     // Compare characters.
   5340     __ add(index, index, Operand(1), SetCC);
   5341     __ ldrb(scratch2, MemOperand(left, index), ne);
   5342     __ ldrb(scratch4, MemOperand(right, index), ne);
   5343     // Skip to compare lengths with eq condition true.
   5344     __ b(eq, &compare_lengths);
   5345     __ cmp(scratch2, scratch4);
   5346     __ b(eq, &loop);
   5347     // Fallthrough with eq condition false.
   5348   }
   5349   // Compare lengths -  strings up to min-length are equal.
   5350   __ bind(&compare_lengths);
   5351   ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   5352   // Use zero length_delta as result.
   5353   __ mov(r0, Operand(length_delta), SetCC, eq);
   5354   // Fall through to here if characters compare not-equal.
   5355   __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
   5356   __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
   5357   __ Ret();
   5358 }
   5359 
   5360 
   5361 void StringCompareStub::Generate(MacroAssembler* masm) {
   5362   Label runtime;
   5363 
   5364   Counters* counters = masm->isolate()->counters();
   5365 
   5366   // Stack frame on entry.
   5367   //  sp[0]: right string
   5368   //  sp[4]: left string
   5369   __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1.
   5370 
   5371   Label not_same;
   5372   __ cmp(r0, r1);
   5373   __ b(ne, &not_same);
   5374   STATIC_ASSERT(EQUAL == 0);
   5375   STATIC_ASSERT(kSmiTag == 0);
   5376   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   5377   __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
   5378   __ add(sp, sp, Operand(2 * kPointerSize));
   5379   __ Ret();
   5380 
   5381   __ bind(&not_same);
   5382 
   5383   // Check that both objects are sequential ASCII strings.
   5384   __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
   5385 
   5386   // Compare flat ASCII strings natively. Remove arguments from stack first.
   5387   __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
   5388   __ add(sp, sp, Operand(2 * kPointerSize));
   5389   GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
   5390 
   5391   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   5392   // tagged as a small integer.
   5393   __ bind(&runtime);
   5394   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   5395 }
   5396 
   5397 
   5398 void StringAddStub::Generate(MacroAssembler* masm) {
   5399   Label string_add_runtime, call_builtin;
   5400   Builtins::JavaScript builtin_id = Builtins::ADD;
   5401 
   5402   Counters* counters = masm->isolate()->counters();
   5403 
   5404   // Stack on entry:
   5405   // sp[0]: second argument (right).
   5406   // sp[4]: first argument (left).
   5407 
   5408   // Load the two arguments.
   5409   __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
   5410   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
   5411 
   5412   // Make sure that both arguments are strings if not known in advance.
   5413   if (flags_ == NO_STRING_ADD_FLAGS) {
   5414     __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
   5415     // Load instance types.
   5416     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
   5417     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
   5418     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   5419     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   5420     STATIC_ASSERT(kStringTag == 0);
   5421     // If either is not a string, go to runtime.
   5422     __ tst(r4, Operand(kIsNotStringMask));
   5423     __ tst(r5, Operand(kIsNotStringMask), eq);
   5424     __ b(ne, &string_add_runtime);
   5425   } else {
   5426     // Here at least one of the arguments is definitely a string.
   5427     // We convert the one that is not known to be a string.
   5428     if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
   5429       ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
   5430       GenerateConvertArgument(
   5431           masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
   5432       builtin_id = Builtins::STRING_ADD_RIGHT;
   5433     } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
   5434       ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
   5435       GenerateConvertArgument(
   5436           masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
   5437       builtin_id = Builtins::STRING_ADD_LEFT;
   5438     }
   5439   }
   5440 
   5441   // Both arguments are strings.
   5442   // r0: first string
   5443   // r1: second string
   5444   // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   5445   // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   5446   {
   5447     Label strings_not_empty;
   5448     // Check if either of the strings are empty. In that case return the other.
   5449     __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
   5450     __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
   5451     STATIC_ASSERT(kSmiTag == 0);
   5452     __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
   5453     __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
   5454     STATIC_ASSERT(kSmiTag == 0);
   5455      // Else test if second string is empty.
   5456     __ cmp(r3, Operand(Smi::FromInt(0)), ne);
   5457     __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
   5458 
   5459     __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   5460     __ add(sp, sp, Operand(2 * kPointerSize));
   5461     __ Ret();
   5462 
   5463     __ bind(&strings_not_empty);
   5464   }
   5465 
   5466   __ mov(r2, Operand(r2, ASR, kSmiTagSize));
   5467   __ mov(r3, Operand(r3, ASR, kSmiTagSize));
   5468   // Both strings are non-empty.
   5469   // r0: first string
   5470   // r1: second string
   5471   // r2: length of first string
   5472   // r3: length of second string
   5473   // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   5474   // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   5475   // Look at the length of the result of adding the two strings.
   5476   Label string_add_flat_result, longer_than_two;
   5477   // Adding two lengths can't overflow.
   5478   STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
   5479   __ add(r6, r2, Operand(r3));
   5480   // Use the symbol table when adding two one character strings, as it
   5481   // helps later optimizations to return a symbol here.
   5482   __ cmp(r6, Operand(2));
   5483   __ b(ne, &longer_than_two);
   5484 
   5485   // Check that both strings are non-external ASCII strings.
   5486   if (flags_ != NO_STRING_ADD_FLAGS) {
   5487     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
   5488     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
   5489     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   5490     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   5491   }
   5492   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
   5493                                                   &string_add_runtime);
   5494 
   5495   // Get the two characters forming the sub string.
   5496   __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
   5497   __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
   5498 
   5499   // Try to lookup two character string in symbol table. If it is not found
   5500   // just allocate a new one.
   5501   Label make_two_character_string;
   5502   StringHelper::GenerateTwoCharacterSymbolTableProbe(
   5503       masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
   5504   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   5505   __ add(sp, sp, Operand(2 * kPointerSize));
   5506   __ Ret();
   5507 
   5508   __ bind(&make_two_character_string);
   5509   // Resulting string has length 2 and first chars of two strings
   5510   // are combined into single halfword in r2 register.
   5511   // So we can fill resulting string without two loops by a single
   5512   // halfword store instruction (which assumes that processor is
   5513   // in a little endian mode)
   5514   __ mov(r6, Operand(2));
   5515   __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
   5516   __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
   5517   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   5518   __ add(sp, sp, Operand(2 * kPointerSize));
   5519   __ Ret();
   5520 
   5521   __ bind(&longer_than_two);
   5522   // Check if resulting string will be flat.
   5523   __ cmp(r6, Operand(String::kMinNonFlatLength));
   5524   __ b(lt, &string_add_flat_result);
   5525   // Handle exceptionally long strings in the runtime system.
   5526   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   5527   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   5528   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
   5529   __ cmp(r6, Operand(String::kMaxLength + 1));
   5530   __ b(hs, &string_add_runtime);
   5531 
   5532   // If result is not supposed to be flat, allocate a cons string object.
   5533   // If both strings are ASCII the result is an ASCII cons string.
   5534   if (flags_ != NO_STRING_ADD_FLAGS) {
   5535     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
   5536     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
   5537     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   5538     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   5539   }
   5540   Label non_ascii, allocated, ascii_data;
   5541   STATIC_ASSERT(kTwoByteStringTag == 0);
   5542   __ tst(r4, Operand(kStringEncodingMask));
   5543   __ tst(r5, Operand(kStringEncodingMask), ne);
   5544   __ b(eq, &non_ascii);
   5545 
   5546   // Allocate an ASCII cons string.
   5547   __ bind(&ascii_data);
   5548   __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
   5549   __ bind(&allocated);
   5550   // Fill the fields of the cons string.
   5551   __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
   5552   __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
   5553   __ mov(r0, Operand(r7));
   5554   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   5555   __ add(sp, sp, Operand(2 * kPointerSize));
   5556   __ Ret();
   5557 
   5558   __ bind(&non_ascii);
   5559   // At least one of the strings is two-byte. Check whether it happens
   5560   // to contain only ASCII characters.
   5561   // r4: first instance type.
   5562   // r5: second instance type.
   5563   __ tst(r4, Operand(kAsciiDataHintMask));
   5564   __ tst(r5, Operand(kAsciiDataHintMask), ne);
   5565   __ b(ne, &ascii_data);
   5566   __ eor(r4, r4, Operand(r5));
   5567   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   5568   __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
   5569   __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
   5570   __ b(eq, &ascii_data);
   5571 
   5572   // Allocate a two byte cons string.
   5573   __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
   5574   __ jmp(&allocated);
   5575 
   5576   // Handle creating a flat result. First check that both strings are
   5577   // sequential and that they have the same encoding.
   5578   // r0: first string
   5579   // r1: second string
   5580   // r2: length of first string
   5581   // r3: length of second string
   5582   // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   5583   // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   5584   // r6: sum of lengths.
   5585   __ bind(&string_add_flat_result);
   5586   if (flags_ != NO_STRING_ADD_FLAGS) {
   5587     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
   5588     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
   5589     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   5590     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   5591   }
   5592   // Check that both strings are sequential.
   5593   STATIC_ASSERT(kSeqStringTag == 0);
   5594   __ tst(r4, Operand(kStringRepresentationMask));
   5595   __ tst(r5, Operand(kStringRepresentationMask), eq);
   5596   __ b(ne, &string_add_runtime);
   5597   // Now check if both strings have the same encoding (ASCII/Two-byte).
   5598   // r0: first string.
   5599   // r1: second string.
   5600   // r2: length of first string.
   5601   // r3: length of second string.
   5602   // r6: sum of lengths..
   5603   Label non_ascii_string_add_flat_result;
   5604   ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
   5605   __ eor(r7, r4, Operand(r5));
   5606   __ tst(r7, Operand(kStringEncodingMask));
   5607   __ b(ne, &string_add_runtime);
   5608   // And see if it's ASCII or two-byte.
   5609   __ tst(r4, Operand(kStringEncodingMask));
   5610   __ b(eq, &non_ascii_string_add_flat_result);
   5611 
   5612   // Both strings are sequential ASCII strings. We also know that they are
   5613   // short (since the sum of the lengths is less than kMinNonFlatLength).
   5614   // r6: length of resulting flat string
   5615   __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
   5616   // Locate first character of result.
   5617   __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   5618   // Locate first character of first argument.
   5619   __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   5620   // r0: first character of first string.
   5621   // r1: second string.
   5622   // r2: length of first string.
   5623   // r3: length of second string.
   5624   // r6: first character of result.
   5625   // r7: result string.
   5626   StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
   5627 
   5628   // Load second argument and locate first character.
   5629   __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   5630   // r1: first character of second string.
   5631   // r3: length of second string.
   5632   // r6: next character of result.
   5633   // r7: result string.
   5634   StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
   5635   __ mov(r0, Operand(r7));
   5636   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   5637   __ add(sp, sp, Operand(2 * kPointerSize));
   5638   __ Ret();
   5639 
   5640   __ bind(&non_ascii_string_add_flat_result);
   5641   // Both strings are sequential two byte strings.
   5642   // r0: first string.
   5643   // r1: second string.
   5644   // r2: length of first string.
   5645   // r3: length of second string.
   5646   // r6: sum of length of strings.
   5647   __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
   5648   // r0: first string.
   5649   // r1: second string.
   5650   // r2: length of first string.
   5651   // r3: length of second string.
   5652   // r7: result string.
   5653 
   5654   // Locate first character of result.
   5655   __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   5656   // Locate first character of first argument.
   5657   __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   5658 
   5659   // r0: first character of first string.
   5660   // r1: second string.
   5661   // r2: length of first string.
   5662   // r3: length of second string.
   5663   // r6: first character of result.
   5664   // r7: result string.
   5665   StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
   5666 
   5667   // Locate first character of second argument.
   5668   __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   5669 
   5670   // r1: first character of second string.
   5671   // r3: length of second string.
   5672   // r6: next character of result (after copy of first string).
   5673   // r7: result string.
   5674   StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
   5675 
   5676   __ mov(r0, Operand(r7));
   5677   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   5678   __ add(sp, sp, Operand(2 * kPointerSize));
   5679   __ Ret();
   5680 
   5681   // Just jump to runtime to add the two strings.
   5682   __ bind(&string_add_runtime);
   5683   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
   5684 
   5685   if (call_builtin.is_linked()) {
   5686     __ bind(&call_builtin);
   5687     __ InvokeBuiltin(builtin_id, JUMP_JS);
   5688   }
   5689 }
   5690 
   5691 
   5692 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
   5693                                             int stack_offset,
   5694                                             Register arg,
   5695                                             Register scratch1,
   5696                                             Register scratch2,
   5697                                             Register scratch3,
   5698                                             Register scratch4,
   5699                                             Label* slow) {
   5700   // First check if the argument is already a string.
   5701   Label not_string, done;
   5702   __ JumpIfSmi(arg, &not_string);
   5703   __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
   5704   __ b(lt, &done);
   5705 
   5706   // Check the number to string cache.
   5707   Label not_cached;
   5708   __ bind(&not_string);
   5709   // Puts the cached result into scratch1.
   5710   NumberToStringStub::GenerateLookupNumberStringCache(masm,
   5711                                                       arg,
   5712                                                       scratch1,
   5713                                                       scratch2,
   5714                                                       scratch3,
   5715                                                       scratch4,
   5716                                                       false,
   5717                                                       &not_cached);
   5718   __ mov(arg, scratch1);
   5719   __ str(arg, MemOperand(sp, stack_offset));
   5720   __ jmp(&done);
   5721 
   5722   // Check if the argument is a safe string wrapper.
   5723   __ bind(&not_cached);
   5724   __ JumpIfSmi(arg, slow);
   5725   __ CompareObjectType(
   5726       arg, scratch1, scratch2, JS_VALUE_TYPE);  // map -> scratch1.
   5727   __ b(ne, slow);
   5728   __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
   5729   __ and_(scratch2,
   5730           scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
   5731   __ cmp(scratch2,
   5732          Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
   5733   __ b(ne, slow);
   5734   __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
   5735   __ str(arg, MemOperand(sp, stack_offset));
   5736 
   5737   __ bind(&done);
   5738 }
   5739 
   5740 
   5741 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   5742   ASSERT(state_ == CompareIC::SMIS);
   5743   Label miss;
   5744   __ orr(r2, r1, r0);
   5745   __ tst(r2, Operand(kSmiTagMask));
   5746   __ b(ne, &miss);
   5747 
   5748   if (GetCondition() == eq) {
   5749     // For equality we do not care about the sign of the result.
   5750     __ sub(r0, r0, r1, SetCC);
   5751   } else {
   5752     // Untag before subtracting to avoid handling overflow.
   5753     __ SmiUntag(r1);
   5754     __ sub(r0, r1, SmiUntagOperand(r0));
   5755   }
   5756   __ Ret();
   5757 
   5758   __ bind(&miss);
   5759   GenerateMiss(masm);
   5760 }
   5761 
   5762 
   5763 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
   5764   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
   5765 
   5766   Label generic_stub;
   5767   Label unordered;
   5768   Label miss;
   5769   __ and_(r2, r1, Operand(r0));
   5770   __ tst(r2, Operand(kSmiTagMask));
   5771   __ b(eq, &generic_stub);
   5772 
   5773   __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
   5774   __ b(ne, &miss);
   5775   __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
   5776   __ b(ne, &miss);
   5777 
   5778   // Inlining the double comparison and falling back to the general compare
   5779   // stub if NaN is involved or VFP3 is unsupported.
   5780   if (CpuFeatures::IsSupported(VFP3)) {
   5781     CpuFeatures::Scope scope(VFP3);
   5782 
   5783     // Load left and right operand
   5784     __ sub(r2, r1, Operand(kHeapObjectTag));
   5785     __ vldr(d0, r2, HeapNumber::kValueOffset);
   5786     __ sub(r2, r0, Operand(kHeapObjectTag));
   5787     __ vldr(d1, r2, HeapNumber::kValueOffset);
   5788 
   5789     // Compare operands
   5790     __ VFPCompareAndSetFlags(d0, d1);
   5791 
   5792     // Don't base result on status bits when a NaN is involved.
   5793     __ b(vs, &unordered);
   5794 
   5795     // Return a result of -1, 0, or 1, based on status bits.
   5796     __ mov(r0, Operand(EQUAL), LeaveCC, eq);
   5797     __ mov(r0, Operand(LESS), LeaveCC, lt);
   5798     __ mov(r0, Operand(GREATER), LeaveCC, gt);
   5799     __ Ret();
   5800 
   5801     __ bind(&unordered);
   5802   }
   5803 
   5804   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
   5805   __ bind(&generic_stub);
   5806   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   5807 
   5808   __ bind(&miss);
   5809   GenerateMiss(masm);
   5810 }
   5811 
   5812 
   5813 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   5814   ASSERT(state_ == CompareIC::OBJECTS);
   5815   Label miss;
   5816   __ and_(r2, r1, Operand(r0));
   5817   __ tst(r2, Operand(kSmiTagMask));
   5818   __ b(eq, &miss);
   5819 
   5820   __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
   5821   __ b(ne, &miss);
   5822   __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
   5823   __ b(ne, &miss);
   5824 
   5825   ASSERT(GetCondition() == eq);
   5826   __ sub(r0, r0, Operand(r1));
   5827   __ Ret();
   5828 
   5829   __ bind(&miss);
   5830   GenerateMiss(masm);
   5831 }
   5832 
   5833 
   5834 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
   5835   __ Push(r1, r0);
   5836   __ push(lr);
   5837 
   5838   // Call the runtime system in a fresh internal frame.
   5839   ExternalReference miss =
   5840       ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
   5841   __ EnterInternalFrame();
   5842   __ Push(r1, r0);
   5843   __ mov(ip, Operand(Smi::FromInt(op_)));
   5844   __ push(ip);
   5845   __ CallExternalReference(miss, 3);
   5846   __ LeaveInternalFrame();
   5847   // Compute the entry point of the rewritten stub.
   5848   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
   5849   // Restore registers.
   5850   __ pop(lr);
   5851   __ pop(r0);
   5852   __ pop(r1);
   5853   __ Jump(r2);
   5854 }
   5855 
   5856 
   5857 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   5858   __ ldr(pc, MemOperand(sp, 0));
   5859 }
   5860 
   5861 
   5862 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   5863                                     ExternalReference function) {
   5864   __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
   5865                      RelocInfo::CODE_TARGET));
   5866   __ mov(r2, Operand(function));
   5867   // Push return address (accessible to GC through exit frame pc).
   5868   __ str(pc, MemOperand(sp, 0));
   5869   __ Jump(r2);  // Call the api function.
   5870 }
   5871 
   5872 
   5873 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   5874                                     Register target) {
   5875   __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
   5876                      RelocInfo::CODE_TARGET));
   5877   // Push return address (accessible to GC through exit frame pc).
   5878   __ str(pc, MemOperand(sp, 0));
   5879   __ Jump(target);  // Call the C++ function.
   5880 }
   5881 
   5882 
   5883 #undef __
   5884 
   5885 } }  // namespace v8::internal
   5886 
   5887 #endif  // V8_TARGET_ARCH_ARM
   5888