Home | History | Annotate | Download | only in arm
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_ARM
      6 
      7 #include "src/code-stubs.h"
      8 #include "src/api-arguments.h"
      9 #include "src/base/bits.h"
     10 #include "src/bootstrapper.h"
     11 #include "src/codegen.h"
     12 #include "src/ic/handler-compiler.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 #include "src/isolate.h"
     16 #include "src/regexp/jsregexp.h"
     17 #include "src/regexp/regexp-macro-assembler.h"
     18 #include "src/runtime/runtime.h"
     19 
     20 #include "src/arm/code-stubs-arm.h"
     21 
     22 namespace v8 {
     23 namespace internal {
     24 
     25 #define __ ACCESS_MASM(masm)
     26 
     27 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
     28   __ lsl(r5, r0, Operand(kPointerSizeLog2));
     29   __ str(r1, MemOperand(sp, r5));
     30   __ Push(r1);
     31   __ Push(r2);
     32   __ add(r0, r0, Operand(3));
     33   __ TailCallRuntime(Runtime::kNewArray);
     34 }
     35 
     36 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
     37   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
     38   descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
     39 }
     40 
     41 void FastFunctionBindStub::InitializeDescriptor(
     42     CodeStubDescriptor* descriptor) {
     43   Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
     44   descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
     45 }
     46 
     47 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
     48                                           Condition cond);
     49 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
     50                                     Register lhs,
     51                                     Register rhs,
     52                                     Label* lhs_not_nan,
     53                                     Label* slow,
     54                                     bool strict);
     55 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
     56                                            Register lhs,
     57                                            Register rhs);
     58 
     59 
     60 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
     61                                                ExternalReference miss) {
     62   // Update the static counter each time a new code stub is generated.
     63   isolate()->counters()->code_stubs()->Increment();
     64 
     65   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
     66   int param_count = descriptor.GetRegisterParameterCount();
     67   {
     68     // Call the runtime system in a fresh internal frame.
     69     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     70     DCHECK(param_count == 0 ||
     71            r0.is(descriptor.GetRegisterParameter(param_count - 1)));
     72     // Push arguments
     73     for (int i = 0; i < param_count; ++i) {
     74       __ push(descriptor.GetRegisterParameter(i));
     75     }
     76     __ CallExternalReference(miss, param_count);
     77   }
     78 
     79   __ Ret();
     80 }
     81 
     82 
     83 void DoubleToIStub::Generate(MacroAssembler* masm) {
     84   Label out_of_range, only_low, negate, done;
     85   Register input_reg = source();
     86   Register result_reg = destination();
     87   DCHECK(is_truncating());
     88 
     89   int double_offset = offset();
     90   // Account for saved regs if input is sp.
     91   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
     92 
     93   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
     94   Register scratch_low =
     95       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
     96   Register scratch_high =
     97       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
     98   LowDwVfpRegister double_scratch = kScratchDoubleReg;
     99 
    100   __ Push(scratch_high, scratch_low, scratch);
    101 
    102   if (!skip_fastpath()) {
    103     // Load double input.
    104     __ vldr(double_scratch, MemOperand(input_reg, double_offset));
    105     __ vmov(scratch_low, scratch_high, double_scratch);
    106 
    107     // Do fast-path convert from double to int.
    108     __ vcvt_s32_f64(double_scratch.low(), double_scratch);
    109     __ vmov(result_reg, double_scratch.low());
    110 
    111     // If result is not saturated (0x7fffffff or 0x80000000), we are done.
    112     __ sub(scratch, result_reg, Operand(1));
    113     __ cmp(scratch, Operand(0x7ffffffe));
    114     __ b(lt, &done);
    115   } else {
    116     // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
    117     // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
    118     if (double_offset == 0) {
    119       __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
    120     } else {
    121       __ ldr(scratch_low, MemOperand(input_reg, double_offset));
    122       __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
    123     }
    124   }
    125 
    126   __ Ubfx(scratch, scratch_high,
    127          HeapNumber::kExponentShift, HeapNumber::kExponentBits);
    128   // Load scratch with exponent - 1. This is faster than loading
    129   // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
    130   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
    131   __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
    132   // If exponent is greater than or equal to 84, the 32 less significant
    133   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
    134   // the result is 0.
    135   // Compare exponent with 84 (compare exponent - 1 with 83).
    136   __ cmp(scratch, Operand(83));
    137   __ b(ge, &out_of_range);
    138 
    139   // If we reach this code, 31 <= exponent <= 83.
    140   // So, we don't have to handle cases where 0 <= exponent <= 20 for
    141   // which we would need to shift right the high part of the mantissa.
    142   // Scratch contains exponent - 1.
    143   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
    144   __ rsb(scratch, scratch, Operand(51), SetCC);
    145   __ b(ls, &only_low);
    146   // 21 <= exponent <= 51, shift scratch_low and scratch_high
    147   // to generate the result.
    148   __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
    149   // Scratch contains: 52 - exponent.
    150   // We needs: exponent - 20.
    151   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
    152   __ rsb(scratch, scratch, Operand(32));
    153   __ Ubfx(result_reg, scratch_high,
    154           0, HeapNumber::kMantissaBitsInTopWord);
    155   // Set the implicit 1 before the mantissa part in scratch_high.
    156   __ orr(result_reg, result_reg,
    157          Operand(1 << HeapNumber::kMantissaBitsInTopWord));
    158   __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
    159   __ b(&negate);
    160 
    161   __ bind(&out_of_range);
    162   __ mov(result_reg, Operand::Zero());
    163   __ b(&done);
    164 
    165   __ bind(&only_low);
    166   // 52 <= exponent <= 83, shift only scratch_low.
    167   // On entry, scratch contains: 52 - exponent.
    168   __ rsb(scratch, scratch, Operand::Zero());
    169   __ mov(result_reg, Operand(scratch_low, LSL, scratch));
    170 
    171   __ bind(&negate);
    172   // If input was positive, scratch_high ASR 31 equals 0 and
    173   // scratch_high LSR 31 equals zero.
    174   // New result = (result eor 0) + 0 = result.
    175   // If the input was negative, we have to negate the result.
    176   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
    177   // New result = (result eor 0xffffffff) + 1 = 0 - result.
    178   __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
    179   __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
    180 
    181   __ bind(&done);
    182 
    183   __ Pop(scratch_high, scratch_low, scratch);
    184   __ Ret();
    185 }
    186 
    187 
    188 // Handle the case where the lhs and rhs are the same object.
    189 // Equality is almost reflexive (everything but NaN), so this is a test
    190 // for "identity and not NaN".
    191 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
    192                                           Condition cond) {
    193   Label not_identical;
    194   Label heap_number, return_equal;
    195   __ cmp(r0, r1);
    196   __ b(ne, &not_identical);
    197 
    198   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
    199   // so we do the second best thing - test it ourselves.
    200   // They are both equal and they are not both Smis so both of them are not
    201   // Smis.  If it's not a heap number, then return equal.
    202   if (cond == lt || cond == gt) {
    203     // Call runtime on identical JSObjects.
    204     __ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
    205     __ b(ge, slow);
    206     // Call runtime on identical symbols since we need to throw a TypeError.
    207     __ cmp(r4, Operand(SYMBOL_TYPE));
    208     __ b(eq, slow);
    209     // Call runtime on identical SIMD values since we must throw a TypeError.
    210     __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
    211     __ b(eq, slow);
    212   } else {
    213     __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
    214     __ b(eq, &heap_number);
    215     // Comparing JS objects with <=, >= is complicated.
    216     if (cond != eq) {
    217       __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
    218       __ b(ge, slow);
    219       // Call runtime on identical symbols since we need to throw a TypeError.
    220       __ cmp(r4, Operand(SYMBOL_TYPE));
    221       __ b(eq, slow);
    222       // Call runtime on identical SIMD values since we must throw a TypeError.
    223       __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
    224       __ b(eq, slow);
    225       // Normally here we fall through to return_equal, but undefined is
    226       // special: (undefined == undefined) == true, but
    227       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    228       if (cond == le || cond == ge) {
    229         __ cmp(r4, Operand(ODDBALL_TYPE));
    230         __ b(ne, &return_equal);
    231         __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
    232         __ cmp(r0, r2);
    233         __ b(ne, &return_equal);
    234         if (cond == le) {
    235           // undefined <= undefined should fail.
    236           __ mov(r0, Operand(GREATER));
    237         } else  {
    238           // undefined >= undefined should fail.
    239           __ mov(r0, Operand(LESS));
    240         }
    241         __ Ret();
    242       }
    243     }
    244   }
    245 
    246   __ bind(&return_equal);
    247   if (cond == lt) {
    248     __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
    249   } else if (cond == gt) {
    250     __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
    251   } else {
    252     __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
    253   }
    254   __ Ret();
    255 
    256   // For less and greater we don't have to check for NaN since the result of
    257   // x < x is false regardless.  For the others here is some code to check
    258   // for NaN.
    259   if (cond != lt && cond != gt) {
    260     __ bind(&heap_number);
    261     // It is a heap number, so return non-equal if it's NaN and equal if it's
    262     // not NaN.
    263 
    264     // The representation of NaN values has all exponent bits (52..62) set,
    265     // and not all mantissa bits (0..51) clear.
    266     // Read top bits of double representation (second word of value).
    267     __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
    268     // Test that exponent bits are all set.
    269     __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
    270     // NaNs have all-one exponents so they sign extend to -1.
    271     __ cmp(r3, Operand(-1));
    272     __ b(ne, &return_equal);
    273 
    274     // Shift out flag and all exponent bits, retaining only mantissa.
    275     __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
    276     // Or with all low-bits of mantissa.
    277     __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
    278     __ orr(r0, r3, Operand(r2), SetCC);
    279     // For equal we already have the right value in r0:  Return zero (equal)
    280     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
    281     // not (it's a NaN).  For <= and >= we need to load r0 with the failing
    282     // value if it's a NaN.
    283     if (cond != eq) {
    284       // All-zero means Infinity means equal.
    285       __ Ret(eq);
    286       if (cond == le) {
    287         __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
    288       } else {
    289         __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
    290       }
    291     }
    292     __ Ret();
    293   }
    294   // No fall through here.
    295 
    296   __ bind(&not_identical);
    297 }
    298 
    299 
    300 // See comment at call site.
    301 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
    302                                     Register lhs,
    303                                     Register rhs,
    304                                     Label* lhs_not_nan,
    305                                     Label* slow,
    306                                     bool strict) {
    307   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
    308          (lhs.is(r1) && rhs.is(r0)));
    309 
    310   Label rhs_is_smi;
    311   __ JumpIfSmi(rhs, &rhs_is_smi);
    312 
    313   // Lhs is a Smi.  Check whether the rhs is a heap number.
    314   __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
    315   if (strict) {
    316     // If rhs is not a number and lhs is a Smi then strict equality cannot
    317     // succeed.  Return non-equal
    318     // If rhs is r0 then there is already a non zero value in it.
    319     if (!rhs.is(r0)) {
    320       __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
    321     }
    322     __ Ret(ne);
    323   } else {
    324     // Smi compared non-strictly with a non-Smi non-heap-number.  Call
    325     // the runtime.
    326     __ b(ne, slow);
    327   }
    328 
    329   // Lhs is a smi, rhs is a number.
    330   // Convert lhs to a double in d7.
    331   __ SmiToDouble(d7, lhs);
    332   // Load the double from rhs, tagged HeapNumber r0, to d6.
    333   __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
    334 
    335   // We now have both loaded as doubles but we can skip the lhs nan check
    336   // since it's a smi.
    337   __ jmp(lhs_not_nan);
    338 
    339   __ bind(&rhs_is_smi);
    340   // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
    341   __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
    342   if (strict) {
    343     // If lhs is not a number and rhs is a smi then strict equality cannot
    344     // succeed.  Return non-equal.
    345     // If lhs is r0 then there is already a non zero value in it.
    346     if (!lhs.is(r0)) {
    347       __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
    348     }
    349     __ Ret(ne);
    350   } else {
    351     // Smi compared non-strictly with a non-smi non-heap-number.  Call
    352     // the runtime.
    353     __ b(ne, slow);
    354   }
    355 
    356   // Rhs is a smi, lhs is a heap number.
    357   // Load the double from lhs, tagged HeapNumber r1, to d7.
    358   __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
    359   // Convert rhs to a double in d6              .
    360   __ SmiToDouble(d6, rhs);
    361   // Fall through to both_loaded_as_doubles.
    362 }
    363 
    364 
    365 // See comment at call site.
    366 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
    367                                            Register lhs,
    368                                            Register rhs) {
    369     DCHECK((lhs.is(r0) && rhs.is(r1)) ||
    370            (lhs.is(r1) && rhs.is(r0)));
    371 
    372     // If either operand is a JS object or an oddball value, then they are
    373     // not equal since their pointers are different.
    374     // There is no test for undetectability in strict equality.
    375     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    376     Label first_non_object;
    377     // Get the type of the first operand into r2 and compare it with
    378     // FIRST_JS_RECEIVER_TYPE.
    379     __ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
    380     __ b(lt, &first_non_object);
    381 
    382     // Return non-zero (r0 is not zero)
    383     Label return_not_equal;
    384     __ bind(&return_not_equal);
    385     __ Ret();
    386 
    387     __ bind(&first_non_object);
    388     // Check for oddballs: true, false, null, undefined.
    389     __ cmp(r2, Operand(ODDBALL_TYPE));
    390     __ b(eq, &return_not_equal);
    391 
    392     __ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
    393     __ b(ge, &return_not_equal);
    394 
    395     // Check for oddballs: true, false, null, undefined.
    396     __ cmp(r3, Operand(ODDBALL_TYPE));
    397     __ b(eq, &return_not_equal);
    398 
    399     // Now that we have the types we might as well check for
    400     // internalized-internalized.
    401     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    402     __ orr(r2, r2, Operand(r3));
    403     __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
    404     __ b(eq, &return_not_equal);
    405 }
    406 
    407 
    408 // See comment at call site.
    409 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
    410                                        Register lhs,
    411                                        Register rhs,
    412                                        Label* both_loaded_as_doubles,
    413                                        Label* not_heap_numbers,
    414                                        Label* slow) {
    415   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
    416          (lhs.is(r1) && rhs.is(r0)));
    417 
    418   __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
    419   __ b(ne, not_heap_numbers);
    420   __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
    421   __ cmp(r2, r3);
    422   __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
    423 
    424   // Both are heap numbers.  Load them up then jump to the code we have
    425   // for that.
    426   __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
    427   __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
    428   __ jmp(both_loaded_as_doubles);
    429 }
    430 
    431 
    432 // Fast negative check for internalized-to-internalized equality or receiver
    433 // equality. Also handles the undetectable receiver to null/undefined
    434 // comparison.
    435 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
    436                                                      Register lhs, Register rhs,
    437                                                      Label* possible_strings,
    438                                                      Label* runtime_call) {
    439   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
    440          (lhs.is(r1) && rhs.is(r0)));
    441 
    442   // r2 is object type of rhs.
    443   Label object_test, return_equal, return_unequal, undetectable;
    444   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    445   __ tst(r2, Operand(kIsNotStringMask));
    446   __ b(ne, &object_test);
    447   __ tst(r2, Operand(kIsNotInternalizedMask));
    448   __ b(ne, possible_strings);
    449   __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
    450   __ b(ge, runtime_call);
    451   __ tst(r3, Operand(kIsNotInternalizedMask));
    452   __ b(ne, possible_strings);
    453 
    454   // Both are internalized. We already checked they weren't the same pointer so
    455   // they are not equal. Return non-equal by returning the non-zero object
    456   // pointer in r0.
    457   __ Ret();
    458 
    459   __ bind(&object_test);
    460   __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
    461   __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
    462   __ ldrb(r4, FieldMemOperand(r2, Map::kBitFieldOffset));
    463   __ ldrb(r5, FieldMemOperand(r3, Map::kBitFieldOffset));
    464   __ tst(r4, Operand(1 << Map::kIsUndetectable));
    465   __ b(ne, &undetectable);
    466   __ tst(r5, Operand(1 << Map::kIsUndetectable));
    467   __ b(ne, &return_unequal);
    468 
    469   __ CompareInstanceType(r2, r2, FIRST_JS_RECEIVER_TYPE);
    470   __ b(lt, runtime_call);
    471   __ CompareInstanceType(r3, r3, FIRST_JS_RECEIVER_TYPE);
    472   __ b(lt, runtime_call);
    473 
    474   __ bind(&return_unequal);
    475   // Return non-equal by returning the non-zero object pointer in r0.
    476   __ Ret();
    477 
    478   __ bind(&undetectable);
    479   __ tst(r5, Operand(1 << Map::kIsUndetectable));
    480   __ b(eq, &return_unequal);
    481 
    482   // If both sides are JSReceivers, then the result is false according to
    483   // the HTML specification, which says that only comparisons with null or
    484   // undefined are affected by special casing for document.all.
    485   __ CompareInstanceType(r2, r2, ODDBALL_TYPE);
    486   __ b(eq, &return_equal);
    487   __ CompareInstanceType(r3, r3, ODDBALL_TYPE);
    488   __ b(ne, &return_unequal);
    489 
    490   __ bind(&return_equal);
    491   __ mov(r0, Operand(EQUAL));
    492   __ Ret();
    493 }
    494 
    495 
    496 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
    497                                          Register scratch,
    498                                          CompareICState::State expected,
    499                                          Label* fail) {
    500   Label ok;
    501   if (expected == CompareICState::SMI) {
    502     __ JumpIfNotSmi(input, fail);
    503   } else if (expected == CompareICState::NUMBER) {
    504     __ JumpIfSmi(input, &ok);
    505     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
    506                 DONT_DO_SMI_CHECK);
    507   }
    508   // We could be strict about internalized/non-internalized here, but as long as
    509   // hydrogen doesn't care, the stub doesn't have to care either.
    510   __ bind(&ok);
    511 }
    512 
    513 
    514 // On entry r1 and r2 are the values to be compared.
    515 // On exit r0 is 0, positive or negative to indicate the result of
    516 // the comparison.
    517 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
    518   Register lhs = r1;
    519   Register rhs = r0;
    520   Condition cc = GetCondition();
    521 
    522   Label miss;
    523   CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
    524   CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
    525 
    526   Label slow;  // Call builtin.
    527   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
    528 
    529   Label not_two_smis, smi_done;
    530   __ orr(r2, r1, r0);
    531   __ JumpIfNotSmi(r2, &not_two_smis);
    532   __ mov(r1, Operand(r1, ASR, 1));
    533   __ sub(r0, r1, Operand(r0, ASR, 1));
    534   __ Ret();
    535   __ bind(&not_two_smis);
    536 
    537   // NOTICE! This code is only reached after a smi-fast-case check, so
    538   // it is certain that at least one operand isn't a smi.
    539 
    540   // Handle the case where the objects are identical.  Either returns the answer
    541   // or goes to slow.  Only falls through if the objects were not identical.
    542   EmitIdenticalObjectComparison(masm, &slow, cc);
    543 
    544   // If either is a Smi (we know that not both are), then they can only
    545   // be strictly equal if the other is a HeapNumber.
    546   STATIC_ASSERT(kSmiTag == 0);
    547   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
    548   __ and_(r2, lhs, Operand(rhs));
    549   __ JumpIfNotSmi(r2, &not_smis);
    550   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
    551   // 1) Return the answer.
    552   // 2) Go to slow.
    553   // 3) Fall through to both_loaded_as_doubles.
    554   // 4) Jump to lhs_not_nan.
    555   // In cases 3 and 4 we have found out we were dealing with a number-number
    556   // comparison.  If VFP3 is supported the double values of the numbers have
    557   // been loaded into d7 and d6.  Otherwise, the double values have been loaded
    558   // into r0, r1, r2, and r3.
    559   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
    560 
    561   __ bind(&both_loaded_as_doubles);
    562   // The arguments have been converted to doubles and stored in d6 and d7, if
    563   // VFP3 is supported, or in r0, r1, r2, and r3.
    564   __ bind(&lhs_not_nan);
    565   Label no_nan;
    566   // ARMv7 VFP3 instructions to implement double precision comparison.
    567   __ VFPCompareAndSetFlags(d7, d6);
    568   Label nan;
    569   __ b(vs, &nan);
    570   __ mov(r0, Operand(EQUAL), LeaveCC, eq);
    571   __ mov(r0, Operand(LESS), LeaveCC, lt);
    572   __ mov(r0, Operand(GREATER), LeaveCC, gt);
    573   __ Ret();
    574 
    575   __ bind(&nan);
    576   // If one of the sides was a NaN then the v flag is set.  Load r0 with
    577   // whatever it takes to make the comparison fail, since comparisons with NaN
    578   // always fail.
    579   if (cc == lt || cc == le) {
    580     __ mov(r0, Operand(GREATER));
    581   } else {
    582     __ mov(r0, Operand(LESS));
    583   }
    584   __ Ret();
    585 
    586   __ bind(&not_smis);
    587   // At this point we know we are dealing with two different objects,
    588   // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
    589   if (strict()) {
    590     // This returns non-equal for some object types, or falls through if it
    591     // was not lucky.
    592     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
    593   }
    594 
    595   Label check_for_internalized_strings;
    596   Label flat_string_check;
    597   // Check for heap-number-heap-number comparison.  Can jump to slow case,
    598   // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
    599   // that case.  If the inputs are not doubles then jumps to
    600   // check_for_internalized_strings.
    601   // In this case r2 will contain the type of rhs_.  Never falls through.
    602   EmitCheckForTwoHeapNumbers(masm,
    603                              lhs,
    604                              rhs,
    605                              &both_loaded_as_doubles,
    606                              &check_for_internalized_strings,
    607                              &flat_string_check);
    608 
    609   __ bind(&check_for_internalized_strings);
    610   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
    611   // internalized strings.
    612   if (cc == eq && !strict()) {
    613     // Returns an answer for two internalized strings or two detectable objects.
    614     // Otherwise jumps to string case or not both strings case.
    615     // Assumes that r2 is the type of rhs_ on entry.
    616     EmitCheckForInternalizedStringsOrObjects(
    617         masm, lhs, rhs, &flat_string_check, &slow);
    618   }
    619 
    620   // Check for both being sequential one-byte strings,
    621   // and inline if that is the case.
    622   __ bind(&flat_string_check);
    623 
    624   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
    625 
    626   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
    627                       r3);
    628   if (cc == eq) {
    629     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
    630   } else {
    631     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
    632                                                     r5);
    633   }
    634   // Never falls through to here.
    635 
    636   __ bind(&slow);
    637 
    638   if (cc == eq) {
    639     {
    640       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
    641       __ Push(lhs, rhs);
    642       __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
    643     }
    644     // Turn true into 0 and false into some non-zero value.
    645     STATIC_ASSERT(EQUAL == 0);
    646     __ LoadRoot(r1, Heap::kTrueValueRootIndex);
    647     __ sub(r0, r0, r1);
    648     __ Ret();
    649   } else {
    650     __ Push(lhs, rhs);
    651     int ncr;  // NaN compare result
    652     if (cc == lt || cc == le) {
    653       ncr = GREATER;
    654     } else {
    655       DCHECK(cc == gt || cc == ge);  // remaining cases
    656       ncr = LESS;
    657     }
    658     __ mov(r0, Operand(Smi::FromInt(ncr)));
    659     __ push(r0);
    660 
    661     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    662     // tagged as a small integer.
    663     __ TailCallRuntime(Runtime::kCompare);
    664   }
    665 
    666   __ bind(&miss);
    667   GenerateMiss(masm);
    668 }
    669 
    670 
    671 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
    672   // We don't allow a GC during a store buffer overflow so there is no need to
    673   // store the registers in any particular way, but we do have to store and
    674   // restore them.
    675   __ stm(db_w, sp, kCallerSaved | lr.bit());
    676 
    677   const Register scratch = r1;
    678 
    679   if (save_doubles()) {
    680     __ SaveFPRegs(sp, scratch);
    681   }
    682   const int argument_count = 1;
    683   const int fp_argument_count = 0;
    684 
    685   AllowExternalCallThatCantCauseGC scope(masm);
    686   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
    687   __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
    688   __ CallCFunction(
    689       ExternalReference::store_buffer_overflow_function(isolate()),
    690       argument_count);
    691   if (save_doubles()) {
    692     __ RestoreFPRegs(sp, scratch);
    693   }
    694   __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
    695 }
    696 
    697 
    698 void MathPowStub::Generate(MacroAssembler* masm) {
    699   const Register base = r1;
    700   const Register exponent = MathPowTaggedDescriptor::exponent();
    701   DCHECK(exponent.is(r2));
    702   const Register heapnumbermap = r5;
    703   const Register heapnumber = r0;
    704   const DwVfpRegister double_base = d0;
    705   const DwVfpRegister double_exponent = d1;
    706   const DwVfpRegister double_result = d2;
    707   const DwVfpRegister double_scratch = d3;
    708   const SwVfpRegister single_scratch = s6;
    709   const Register scratch = r9;
    710   const Register scratch2 = r4;
    711 
    712   Label call_runtime, done, int_exponent;
    713   if (exponent_type() == ON_STACK) {
    714     Label base_is_smi, unpack_exponent;
    715     // The exponent and base are supplied as arguments on the stack.
    716     // This can only happen if the stub is called from non-optimized code.
    717     // Load input parameters from stack to double registers.
    718     __ ldr(base, MemOperand(sp, 1 * kPointerSize));
    719     __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
    720 
    721     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
    722 
    723     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
    724     __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
    725     __ cmp(scratch, heapnumbermap);
    726     __ b(ne, &call_runtime);
    727 
    728     __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
    729     __ jmp(&unpack_exponent);
    730 
    731     __ bind(&base_is_smi);
    732     __ vmov(single_scratch, scratch);
    733     __ vcvt_f64_s32(double_base, single_scratch);
    734     __ bind(&unpack_exponent);
    735 
    736     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    737 
    738     __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
    739     __ cmp(scratch, heapnumbermap);
    740     __ b(ne, &call_runtime);
    741     __ vldr(double_exponent,
    742             FieldMemOperand(exponent, HeapNumber::kValueOffset));
    743   } else if (exponent_type() == TAGGED) {
    744     // Base is already in double_base.
    745     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    746 
    747     __ vldr(double_exponent,
    748             FieldMemOperand(exponent, HeapNumber::kValueOffset));
    749   }
    750 
    751   if (exponent_type() != INTEGER) {
    752     Label int_exponent_convert;
    753     // Detect integer exponents stored as double.
    754     __ vcvt_u32_f64(single_scratch, double_exponent);
    755     // We do not check for NaN or Infinity here because comparing numbers on
    756     // ARM correctly distinguishes NaNs.  We end up calling the built-in.
    757     __ vcvt_f64_u32(double_scratch, single_scratch);
    758     __ VFPCompareAndSetFlags(double_scratch, double_exponent);
    759     __ b(eq, &int_exponent_convert);
    760 
    761     if (exponent_type() == ON_STACK) {
    762       // Detect square root case.  Crankshaft detects constant +/-0.5 at
    763       // compile time and uses DoMathPowHalf instead.  We then skip this check
    764       // for non-constant cases of +/-0.5 as these hardly occur.
    765       Label not_plus_half;
    766 
    767       // Test for 0.5.
    768       __ vmov(double_scratch, 0.5, scratch);
    769       __ VFPCompareAndSetFlags(double_exponent, double_scratch);
    770       __ b(ne, &not_plus_half);
    771 
    772       // Calculates square root of base.  Check for the special case of
    773       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
    774       __ vmov(double_scratch, -V8_INFINITY, scratch);
    775       __ VFPCompareAndSetFlags(double_base, double_scratch);
    776       __ vneg(double_result, double_scratch, eq);
    777       __ b(eq, &done);
    778 
    779       // Add +0 to convert -0 to +0.
    780       __ vadd(double_scratch, double_base, kDoubleRegZero);
    781       __ vsqrt(double_result, double_scratch);
    782       __ jmp(&done);
    783 
    784       __ bind(&not_plus_half);
    785       __ vmov(double_scratch, -0.5, scratch);
    786       __ VFPCompareAndSetFlags(double_exponent, double_scratch);
    787       __ b(ne, &call_runtime);
    788 
    789       // Calculates square root of base.  Check for the special case of
    790       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
    791       __ vmov(double_scratch, -V8_INFINITY, scratch);
    792       __ VFPCompareAndSetFlags(double_base, double_scratch);
    793       __ vmov(double_result, kDoubleRegZero, eq);
    794       __ b(eq, &done);
    795 
    796       // Add +0 to convert -0 to +0.
    797       __ vadd(double_scratch, double_base, kDoubleRegZero);
    798       __ vmov(double_result, 1.0, scratch);
    799       __ vsqrt(double_scratch, double_scratch);
    800       __ vdiv(double_result, double_result, double_scratch);
    801       __ jmp(&done);
    802     }
    803 
    804     __ push(lr);
    805     {
    806       AllowExternalCallThatCantCauseGC scope(masm);
    807       __ PrepareCallCFunction(0, 2, scratch);
    808       __ MovToFloatParameters(double_base, double_exponent);
    809       __ CallCFunction(
    810           ExternalReference::power_double_double_function(isolate()),
    811           0, 2);
    812     }
    813     __ pop(lr);
    814     __ MovFromFloatResult(double_result);
    815     __ jmp(&done);
    816 
    817     __ bind(&int_exponent_convert);
    818     __ vcvt_u32_f64(single_scratch, double_exponent);
    819     __ vmov(scratch, single_scratch);
    820   }
    821 
    822   // Calculate power with integer exponent.
    823   __ bind(&int_exponent);
    824 
    825   // Get two copies of exponent in the registers scratch and exponent.
    826   if (exponent_type() == INTEGER) {
    827     __ mov(scratch, exponent);
    828   } else {
    829     // Exponent has previously been stored into scratch as untagged integer.
    830     __ mov(exponent, scratch);
    831   }
    832   __ vmov(double_scratch, double_base);  // Back up base.
    833   __ vmov(double_result, 1.0, scratch2);
    834 
    835   // Get absolute value of exponent.
    836   __ cmp(scratch, Operand::Zero());
    837   __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
    838   __ sub(scratch, scratch2, scratch, LeaveCC, mi);
    839 
    840   Label while_true;
    841   __ bind(&while_true);
    842   __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
    843   __ vmul(double_result, double_result, double_scratch, cs);
    844   __ vmul(double_scratch, double_scratch, double_scratch, ne);
    845   __ b(ne, &while_true);
    846 
    847   __ cmp(exponent, Operand::Zero());
    848   __ b(ge, &done);
    849   __ vmov(double_scratch, 1.0, scratch);
    850   __ vdiv(double_result, double_scratch, double_result);
    851   // Test whether result is zero.  Bail out to check for subnormal result.
    852   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
    853   __ VFPCompareAndSetFlags(double_result, 0.0);
    854   __ b(ne, &done);
    855   // double_exponent may not containe the exponent value if the input was a
    856   // smi.  We set it with exponent value before bailing out.
    857   __ vmov(single_scratch, exponent);
    858   __ vcvt_f64_s32(double_exponent, single_scratch);
    859 
    860   // Returning or bailing out.
    861   if (exponent_type() == ON_STACK) {
    862     // The arguments are still on the stack.
    863     __ bind(&call_runtime);
    864     __ TailCallRuntime(Runtime::kMathPowRT);
    865 
    866     // The stub is called from non-optimized code, which expects the result
    867     // as heap number in exponent.
    868     __ bind(&done);
    869     __ AllocateHeapNumber(
    870         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
    871     __ vstr(double_result,
    872             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
    873     DCHECK(heapnumber.is(r0));
    874     __ Ret(2);
    875   } else {
    876     __ push(lr);
    877     {
    878       AllowExternalCallThatCantCauseGC scope(masm);
    879       __ PrepareCallCFunction(0, 2, scratch);
    880       __ MovToFloatParameters(double_base, double_exponent);
    881       __ CallCFunction(
    882           ExternalReference::power_double_double_function(isolate()),
    883           0, 2);
    884     }
    885     __ pop(lr);
    886     __ MovFromFloatResult(double_result);
    887 
    888     __ bind(&done);
    889     __ Ret();
    890   }
    891 }
    892 
    893 
    894 bool CEntryStub::NeedsImmovableCode() {
    895   return true;
    896 }
    897 
    898 
    899 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
    900   CEntryStub::GenerateAheadOfTime(isolate);
    901   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
    902   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
    903   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
    904   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
    905   CreateWeakCellStub::GenerateAheadOfTime(isolate);
    906   BinaryOpICStub::GenerateAheadOfTime(isolate);
    907   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
    908   StoreFastElementStub::GenerateAheadOfTime(isolate);
    909   TypeofStub::GenerateAheadOfTime(isolate);
    910 }
    911 
    912 
    913 void CodeStub::GenerateFPStubs(Isolate* isolate) {
    914   // Generate if not already in cache.
    915   SaveFPRegsMode mode = kSaveFPRegs;
    916   CEntryStub(isolate, 1, mode).GetCode();
    917   StoreBufferOverflowStub(isolate, mode).GetCode();
    918   isolate->set_fp_stubs_generated(true);
    919 }
    920 
    921 
    922 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
    923   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
    924   stub.GetCode();
    925 }
    926 
    927 
    928 void CEntryStub::Generate(MacroAssembler* masm) {
    929   // Called from JavaScript; parameters are on stack as if calling JS function.
    930   // r0: number of arguments including receiver
    931   // r1: pointer to builtin function
    932   // fp: frame pointer  (restored after C call)
    933   // sp: stack pointer  (restored as callee's sp after C call)
    934   // cp: current context  (C callee-saved)
    935   //
    936   // If argv_in_register():
    937   // r2: pointer to the first argument
    938   ProfileEntryHookStub::MaybeCallEntryHook(masm);
    939 
    940   __ mov(r5, Operand(r1));
    941 
    942   if (argv_in_register()) {
    943     // Move argv into the correct register.
    944     __ mov(r1, Operand(r2));
    945   } else {
    946     // Compute the argv pointer in a callee-saved register.
    947     __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
    948     __ sub(r1, r1, Operand(kPointerSize));
    949   }
    950 
    951   // Enter the exit frame that transitions from JavaScript to C++.
    952   FrameScope scope(masm, StackFrame::MANUAL);
    953   __ EnterExitFrame(save_doubles());
    954 
    955   // Store a copy of argc in callee-saved registers for later.
    956   __ mov(r4, Operand(r0));
    957 
    958   // r0, r4: number of arguments including receiver  (C callee-saved)
    959   // r1: pointer to the first argument (C callee-saved)
    960   // r5: pointer to builtin function  (C callee-saved)
    961 
    962   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
    963   int frame_alignment_mask = frame_alignment - 1;
    964 #if V8_HOST_ARCH_ARM
    965   if (FLAG_debug_code) {
    966     if (frame_alignment > kPointerSize) {
    967       Label alignment_as_expected;
    968       DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
    969       __ tst(sp, Operand(frame_alignment_mask));
    970       __ b(eq, &alignment_as_expected);
    971       // Don't use Check here, as it will call Runtime_Abort re-entering here.
    972       __ stop("Unexpected alignment");
    973       __ bind(&alignment_as_expected);
    974     }
    975   }
    976 #endif
    977 
    978   // Call C built-in.
    979   int result_stack_size;
    980   if (result_size() <= 2) {
    981     // r0 = argc, r1 = argv, r2 = isolate
    982     __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
    983     result_stack_size = 0;
    984   } else {
    985     DCHECK_EQ(3, result_size());
    986     // Allocate additional space for the result.
    987     result_stack_size =
    988         ((result_size() * kPointerSize) + frame_alignment_mask) &
    989         ~frame_alignment_mask;
    990     __ sub(sp, sp, Operand(result_stack_size));
    991 
    992     // r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
    993     __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
    994     __ mov(r2, Operand(r1));
    995     __ mov(r1, Operand(r0));
    996     __ mov(r0, Operand(sp));
    997   }
    998 
    999   // To let the GC traverse the return address of the exit frames, we need to
   1000   // know where the return address is. The CEntryStub is unmovable, so
   1001   // we can store the address on the stack to be able to find it again and
   1002   // we never have to restore it, because it will not change.
   1003   // Compute the return address in lr to return to after the jump below. Pc is
   1004   // already at '+ 8' from the current instruction but return is after three
   1005   // instructions so add another 4 to pc to get the return address.
   1006   {
   1007     // Prevent literal pool emission before return address.
   1008     Assembler::BlockConstPoolScope block_const_pool(masm);
   1009     __ add(lr, pc, Operand(4));
   1010     __ str(lr, MemOperand(sp, result_stack_size));
   1011     __ Call(r5);
   1012   }
   1013   if (result_size() > 2) {
   1014     DCHECK_EQ(3, result_size());
   1015     // Read result values stored on stack.
   1016     __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
   1017     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
   1018     __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
   1019   }
   1020   // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
   1021 
   1022   // Check result for exception sentinel.
   1023   Label exception_returned;
   1024   __ CompareRoot(r0, Heap::kExceptionRootIndex);
   1025   __ b(eq, &exception_returned);
   1026 
   1027   // Check that there is no pending exception, otherwise we
   1028   // should have returned the exception sentinel.
   1029   if (FLAG_debug_code) {
   1030     Label okay;
   1031     ExternalReference pending_exception_address(
   1032         Isolate::kPendingExceptionAddress, isolate());
   1033     __ mov(r3, Operand(pending_exception_address));
   1034     __ ldr(r3, MemOperand(r3));
   1035     __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
   1036     // Cannot use check here as it attempts to generate call into runtime.
   1037     __ b(eq, &okay);
   1038     __ stop("Unexpected pending exception");
   1039     __ bind(&okay);
   1040   }
   1041 
   1042   // Exit C frame and return.
   1043   // r0:r1: result
   1044   // sp: stack pointer
   1045   // fp: frame pointer
   1046   Register argc;
   1047   if (argv_in_register()) {
   1048     // We don't want to pop arguments so set argc to no_reg.
   1049     argc = no_reg;
   1050   } else {
   1051     // Callee-saved register r4 still holds argc.
   1052     argc = r4;
   1053   }
   1054   __ LeaveExitFrame(save_doubles(), argc, true);
   1055   __ mov(pc, lr);
   1056 
   1057   // Handling of exception.
   1058   __ bind(&exception_returned);
   1059 
   1060   ExternalReference pending_handler_context_address(
   1061       Isolate::kPendingHandlerContextAddress, isolate());
   1062   ExternalReference pending_handler_code_address(
   1063       Isolate::kPendingHandlerCodeAddress, isolate());
   1064   ExternalReference pending_handler_offset_address(
   1065       Isolate::kPendingHandlerOffsetAddress, isolate());
   1066   ExternalReference pending_handler_fp_address(
   1067       Isolate::kPendingHandlerFPAddress, isolate());
   1068   ExternalReference pending_handler_sp_address(
   1069       Isolate::kPendingHandlerSPAddress, isolate());
   1070 
   1071   // Ask the runtime for help to determine the handler. This will set r0 to
   1072   // contain the current pending exception, don't clobber it.
   1073   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
   1074                                  isolate());
   1075   {
   1076     FrameScope scope(masm, StackFrame::MANUAL);
   1077     __ PrepareCallCFunction(3, 0, r0);
   1078     __ mov(r0, Operand(0));
   1079     __ mov(r1, Operand(0));
   1080     __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
   1081     __ CallCFunction(find_handler, 3);
   1082   }
   1083 
   1084   // Retrieve the handler context, SP and FP.
   1085   __ mov(cp, Operand(pending_handler_context_address));
   1086   __ ldr(cp, MemOperand(cp));
   1087   __ mov(sp, Operand(pending_handler_sp_address));
   1088   __ ldr(sp, MemOperand(sp));
   1089   __ mov(fp, Operand(pending_handler_fp_address));
   1090   __ ldr(fp, MemOperand(fp));
   1091 
   1092   // If the handler is a JS frame, restore the context to the frame. Note that
   1093   // the context will be set to (cp == 0) for non-JS frames.
   1094   __ cmp(cp, Operand(0));
   1095   __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
   1096 
   1097   // Compute the handler entry address and jump to it.
   1098   ConstantPoolUnavailableScope constant_pool_unavailable(masm);
   1099   __ mov(r1, Operand(pending_handler_code_address));
   1100   __ ldr(r1, MemOperand(r1));
   1101   __ mov(r2, Operand(pending_handler_offset_address));
   1102   __ ldr(r2, MemOperand(r2));
   1103   __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
   1104   if (FLAG_enable_embedded_constant_pool) {
   1105     __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
   1106   }
   1107   __ add(pc, r1, r2);
   1108 }
   1109 
   1110 
   1111 void JSEntryStub::Generate(MacroAssembler* masm) {
   1112   // r0: code entry
   1113   // r1: function
   1114   // r2: receiver
   1115   // r3: argc
   1116   // [sp+0]: argv
   1117 
   1118   Label invoke, handler_entry, exit;
   1119 
   1120   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1121 
   1122   // Called from C, so do not pop argc and args on exit (preserve sp)
   1123   // No need to save register-passed args
   1124   // Save callee-saved registers (incl. cp and fp), sp, and lr
   1125   __ stm(db_w, sp, kCalleeSaved | lr.bit());
   1126 
   1127   // Save callee-saved vfp registers.
   1128   __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
   1129   // Set up the reserved register for 0.0.
   1130   __ vmov(kDoubleRegZero, 0.0);
   1131 
   1132   // Get address of argv, see stm above.
   1133   // r0: code entry
   1134   // r1: function
   1135   // r2: receiver
   1136   // r3: argc
   1137 
   1138   // Set up argv in r4.
   1139   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   1140   offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
   1141   __ ldr(r4, MemOperand(sp, offset_to_argv));
   1142 
   1143   // Push a frame with special values setup to mark it as an entry frame.
   1144   // r0: code entry
   1145   // r1: function
   1146   // r2: receiver
   1147   // r3: argc
   1148   // r4: argv
   1149   int marker = type();
   1150   if (FLAG_enable_embedded_constant_pool) {
   1151     __ mov(r8, Operand::Zero());
   1152   }
   1153   __ mov(r7, Operand(Smi::FromInt(marker)));
   1154   __ mov(r6, Operand(Smi::FromInt(marker)));
   1155   __ mov(r5,
   1156          Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1157   __ ldr(r5, MemOperand(r5));
   1158   __ mov(ip, Operand(-1));  // Push a bad frame pointer to fail if it is used.
   1159   __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
   1160                        (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
   1161                        ip.bit());
   1162 
   1163   // Set up frame pointer for the frame to be pushed.
   1164   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
   1165 
   1166   // If this is the outermost JS call, set js_entry_sp value.
   1167   Label non_outermost_js;
   1168   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
   1169   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
   1170   __ ldr(r6, MemOperand(r5));
   1171   __ cmp(r6, Operand::Zero());
   1172   __ b(ne, &non_outermost_js);
   1173   __ str(fp, MemOperand(r5));
   1174   __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   1175   Label cont;
   1176   __ b(&cont);
   1177   __ bind(&non_outermost_js);
   1178   __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   1179   __ bind(&cont);
   1180   __ push(ip);
   1181 
   1182   // Jump to a faked try block that does the invoke, with a faked catch
   1183   // block that sets the pending exception.
   1184   __ jmp(&invoke);
   1185 
   1186   // Block literal pool emission whilst taking the position of the handler
   1187   // entry. This avoids making the assumption that literal pools are always
   1188   // emitted after an instruction is emitted, rather than before.
   1189   {
   1190     Assembler::BlockConstPoolScope block_const_pool(masm);
   1191     __ bind(&handler_entry);
   1192     handler_offset_ = handler_entry.pos();
   1193     // Caught exception: Store result (exception) in the pending exception
   1194     // field in the JSEnv and return a failure sentinel.  Coming in here the
   1195     // fp will be invalid because the PushStackHandler below sets it to 0 to
   1196     // signal the existence of the JSEntry frame.
   1197     __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1198                                          isolate())));
   1199   }
   1200   __ str(r0, MemOperand(ip));
   1201   __ LoadRoot(r0, Heap::kExceptionRootIndex);
   1202   __ b(&exit);
   1203 
   1204   // Invoke: Link this frame into the handler chain.
   1205   __ bind(&invoke);
   1206   // Must preserve r0-r4, r5-r6 are available.
   1207   __ PushStackHandler();
   1208   // If an exception not caught by another handler occurs, this handler
   1209   // returns control to the code after the bl(&invoke) above, which
   1210   // restores all kCalleeSaved registers (including cp and fp) to their
   1211   // saved values before returning a failure to C.
   1212 
   1213   // Clear any pending exceptions.
   1214   __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
   1215   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1216                                        isolate())));
   1217   __ str(r5, MemOperand(ip));
   1218 
   1219   // Invoke the function by calling through JS entry trampoline builtin.
   1220   // Notice that we cannot store a reference to the trampoline code directly in
   1221   // this stub, because runtime stubs are not traversed when doing GC.
   1222 
   1223   // Expected registers by Builtins::JSEntryTrampoline
   1224   // r0: code entry
   1225   // r1: function
   1226   // r2: receiver
   1227   // r3: argc
   1228   // r4: argv
   1229   if (type() == StackFrame::ENTRY_CONSTRUCT) {
   1230     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
   1231                                       isolate());
   1232     __ mov(ip, Operand(construct_entry));
   1233   } else {
   1234     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
   1235     __ mov(ip, Operand(entry));
   1236   }
   1237   __ ldr(ip, MemOperand(ip));  // deref address
   1238   __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   1239 
   1240   // Branch and link to JSEntryTrampoline.
   1241   __ Call(ip);
   1242 
   1243   // Unlink this frame from the handler chain.
   1244   __ PopStackHandler();
   1245 
   1246   __ bind(&exit);  // r0 holds result
   1247   // Check if the current stack frame is marked as the outermost JS frame.
   1248   Label non_outermost_js_2;
   1249   __ pop(r5);
   1250   __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   1251   __ b(ne, &non_outermost_js_2);
   1252   __ mov(r6, Operand::Zero());
   1253   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
   1254   __ str(r6, MemOperand(r5));
   1255   __ bind(&non_outermost_js_2);
   1256 
   1257   // Restore the top frame descriptors from the stack.
   1258   __ pop(r3);
   1259   __ mov(ip,
   1260          Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1261   __ str(r3, MemOperand(ip));
   1262 
   1263   // Reset the stack to the callee saved registers.
   1264   __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
   1265 
   1266   // Restore callee-saved registers and return.
   1267 #ifdef DEBUG
   1268   if (FLAG_debug_code) {
   1269     __ mov(lr, Operand(pc));
   1270   }
   1271 #endif
   1272 
   1273   // Restore callee-saved vfp registers.
   1274   __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
   1275 
   1276   __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
   1277 }
   1278 
   1279 
   1280 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   1281   Label miss;
   1282   Register receiver = LoadDescriptor::ReceiverRegister();
   1283   // Ensure that the vector and slot registers won't be clobbered before
   1284   // calling the miss handler.
   1285   DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
   1286                      LoadWithVectorDescriptor::SlotRegister()));
   1287 
   1288   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
   1289                                                           r5, &miss);
   1290   __ bind(&miss);
   1291   PropertyAccessCompiler::TailCallBuiltin(
   1292       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
   1293 }
   1294 
   1295 
   1296 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   1297   // Return address is in lr.
   1298   Label miss;
   1299 
   1300   Register receiver = LoadDescriptor::ReceiverRegister();
   1301   Register index = LoadDescriptor::NameRegister();
   1302   Register scratch = r5;
   1303   Register result = r0;
   1304   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   1305   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
   1306          result.is(LoadWithVectorDescriptor::SlotRegister()));
   1307 
   1308   // StringCharAtGenerator doesn't use the result register until it's passed
   1309   // the different miss possibilities. If it did, we would have a conflict
   1310   // when FLAG_vector_ics is true.
   1311   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
   1312                                           &miss,  // When not a string.
   1313                                           &miss,  // When not a number.
   1314                                           &miss,  // When index out of range.
   1315                                           RECEIVER_IS_STRING);
   1316   char_at_generator.GenerateFast(masm);
   1317   __ Ret();
   1318 
   1319   StubRuntimeCallHelper call_helper;
   1320   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
   1321 
   1322   __ bind(&miss);
   1323   PropertyAccessCompiler::TailCallBuiltin(
   1324       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
   1325 }
   1326 
   1327 
   1328 void RegExpExecStub::Generate(MacroAssembler* masm) {
   1329   // Just jump directly to runtime if native RegExp is not selected at compile
   1330   // time or if regexp entry in generated code is turned off runtime switch or
   1331   // at compilation.
   1332 #ifdef V8_INTERPRETED_REGEXP
   1333   __ TailCallRuntime(Runtime::kRegExpExec);
   1334 #else  // V8_INTERPRETED_REGEXP
   1335 
   1336   // Stack frame on entry.
   1337   //  sp[0]: last_match_info (expected JSArray)
   1338   //  sp[4]: previous index
   1339   //  sp[8]: subject string
   1340   //  sp[12]: JSRegExp object
   1341 
   1342   const int kLastMatchInfoOffset = 0 * kPointerSize;
   1343   const int kPreviousIndexOffset = 1 * kPointerSize;
   1344   const int kSubjectOffset = 2 * kPointerSize;
   1345   const int kJSRegExpOffset = 3 * kPointerSize;
   1346 
   1347   Label runtime;
   1348   // Allocation of registers for this function. These are in callee save
   1349   // registers and will be preserved by the call to the native RegExp code, as
   1350   // this code is called using the normal C calling convention. When calling
   1351   // directly from generated code the native RegExp code will not do a GC and
   1352   // therefore the content of these registers are safe to use after the call.
   1353   Register subject = r4;
   1354   Register regexp_data = r5;
   1355   Register last_match_info_elements = no_reg;  // will be r6;
   1356 
   1357   // Ensure that a RegExp stack is allocated.
   1358   ExternalReference address_of_regexp_stack_memory_address =
   1359       ExternalReference::address_of_regexp_stack_memory_address(isolate());
   1360   ExternalReference address_of_regexp_stack_memory_size =
   1361       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   1362   __ mov(r0, Operand(address_of_regexp_stack_memory_size));
   1363   __ ldr(r0, MemOperand(r0, 0));
   1364   __ cmp(r0, Operand::Zero());
   1365   __ b(eq, &runtime);
   1366 
   1367   // Check that the first argument is a JSRegExp object.
   1368   __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
   1369   __ JumpIfSmi(r0, &runtime);
   1370   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
   1371   __ b(ne, &runtime);
   1372 
   1373   // Check that the RegExp has been compiled (data contains a fixed array).
   1374   __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
   1375   if (FLAG_debug_code) {
   1376     __ SmiTst(regexp_data);
   1377     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   1378     __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
   1379     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   1380   }
   1381 
   1382   // regexp_data: RegExp data (FixedArray)
   1383   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   1384   __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   1385   __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
   1386   __ b(ne, &runtime);
   1387 
   1388   // regexp_data: RegExp data (FixedArray)
   1389   // Check that the number of captures fit in the static offsets vector buffer.
   1390   __ ldr(r2,
   1391          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   1392   // Check (number_of_captures + 1) * 2 <= offsets vector size
   1393   // Or          number_of_captures * 2 <= offsets vector size - 2
   1394   // Multiplying by 2 comes for free since r2 is smi-tagged.
   1395   STATIC_ASSERT(kSmiTag == 0);
   1396   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   1397   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   1398   __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
   1399   __ b(hi, &runtime);
   1400 
   1401   // Reset offset for possibly sliced string.
   1402   __ mov(r9, Operand::Zero());
   1403   __ ldr(subject, MemOperand(sp, kSubjectOffset));
   1404   __ JumpIfSmi(subject, &runtime);
   1405   __ mov(r3, subject);  // Make a copy of the original subject string.
   1406   // subject: subject string
   1407   // r3: subject string
   1408   // regexp_data: RegExp data (FixedArray)
   1409   // Handle subject string according to its encoding and representation:
   1410   // (1) Sequential string?  If yes, go to (4).
   1411   // (2) Sequential or cons?  If not, go to (5).
   1412   // (3) Cons string.  If the string is flat, replace subject with first string
   1413   //     and go to (1). Otherwise bail out to runtime.
   1414   // (4) Sequential string.  Load regexp code according to encoding.
   1415   // (E) Carry on.
   1416   /// [...]
   1417 
   1418   // Deferred code at the end of the stub:
   1419   // (5) Long external string?  If not, go to (7).
   1420   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1421   //     Go to (4).
   1422   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1423   // (8) Sliced string.  Replace subject with parent.  Go to (1).
   1424 
   1425   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
   1426       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
   1427 
   1428   __ bind(&check_underlying);
   1429   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   1430   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   1431 
   1432   // (1) Sequential string?  If yes, go to (4).
   1433   __ and_(r1,
   1434           r0,
   1435           Operand(kIsNotStringMask |
   1436                   kStringRepresentationMask |
   1437                   kShortExternalStringMask),
   1438           SetCC);
   1439   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   1440   __ b(eq, &seq_string);  // Go to (4).
   1441 
   1442   // (2) Sequential or cons?  If not, go to (5).
   1443   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   1444   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   1445   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   1446   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   1447   __ cmp(r1, Operand(kExternalStringTag));
   1448   __ b(ge, &not_seq_nor_cons);  // Go to (5).
   1449 
   1450   // (3) Cons string.  Check that it's flat.
   1451   // Replace subject with first string and reload instance type.
   1452   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
   1453   __ CompareRoot(r0, Heap::kempty_stringRootIndex);
   1454   __ b(ne, &runtime);
   1455   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   1456   __ jmp(&check_underlying);
   1457 
   1458   // (4) Sequential string.  Load regexp code according to encoding.
   1459   __ bind(&seq_string);
   1460   // subject: sequential subject string (or look-alike, external string)
   1461   // r3: original subject string
   1462   // Load previous index and check range before r3 is overwritten.  We have to
   1463   // use r3 instead of subject here because subject might have been only made
   1464   // to look like a sequential string when it actually is an external string.
   1465   __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
   1466   __ JumpIfNotSmi(r1, &runtime);
   1467   __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
   1468   __ cmp(r3, Operand(r1));
   1469   __ b(ls, &runtime);
   1470   __ SmiUntag(r1);
   1471 
   1472   STATIC_ASSERT(4 == kOneByteStringTag);
   1473   STATIC_ASSERT(kTwoByteStringTag == 0);
   1474   __ and_(r0, r0, Operand(kStringEncodingMask));
   1475   __ mov(r3, Operand(r0, ASR, 2), SetCC);
   1476   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
   1477          ne);
   1478   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
   1479 
   1480   // (E) Carry on.  String handling is done.
   1481   // r6: irregexp code
   1482   // Check that the irregexp code has been generated for the actual string
   1483   // encoding. If it has, the field contains a code object otherwise it contains
   1484   // a smi (code flushing support).
   1485   __ JumpIfSmi(r6, &runtime);
   1486 
   1487   // r1: previous index
   1488   // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
   1489   // r6: code
   1490   // subject: Subject string
   1491   // regexp_data: RegExp data (FixedArray)
   1492   // All checks done. Now push arguments for native regexp code.
   1493   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
   1494 
   1495   // Isolates: note we add an additional parameter here (isolate pointer).
   1496   const int kRegExpExecuteArguments = 9;
   1497   const int kParameterRegisters = 4;
   1498   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
   1499 
   1500   // Stack pointer now points to cell where return address is to be written.
   1501   // Arguments are before that on the stack or in registers.
   1502 
   1503   // Argument 9 (sp[20]): Pass current isolate address.
   1504   __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
   1505   __ str(r0, MemOperand(sp, 5 * kPointerSize));
   1506 
   1507   // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
   1508   __ mov(r0, Operand(1));
   1509   __ str(r0, MemOperand(sp, 4 * kPointerSize));
   1510 
   1511   // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
   1512   __ mov(r0, Operand(address_of_regexp_stack_memory_address));
   1513   __ ldr(r0, MemOperand(r0, 0));
   1514   __ mov(r2, Operand(address_of_regexp_stack_memory_size));
   1515   __ ldr(r2, MemOperand(r2, 0));
   1516   __ add(r0, r0, Operand(r2));
   1517   __ str(r0, MemOperand(sp, 3 * kPointerSize));
   1518 
   1519   // Argument 6: Set the number of capture registers to zero to force global
   1520   // regexps to behave as non-global.  This does not affect non-global regexps.
   1521   __ mov(r0, Operand::Zero());
   1522   __ str(r0, MemOperand(sp, 2 * kPointerSize));
   1523 
   1524   // Argument 5 (sp[4]): static offsets vector buffer.
   1525   __ mov(r0,
   1526          Operand(ExternalReference::address_of_static_offsets_vector(
   1527              isolate())));
   1528   __ str(r0, MemOperand(sp, 1 * kPointerSize));
   1529 
   1530   // For arguments 4 and 3 get string length, calculate start of string data and
   1531   // calculate the shift of the index (0 for one-byte and 1 for two-byte).
   1532   __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   1533   __ eor(r3, r3, Operand(1));
   1534   // Load the length from the original subject string from the previous stack
   1535   // frame. Therefore we have to use fp, which points exactly to two pointer
   1536   // sizes below the previous sp. (Because creating a new stack frame pushes
   1537   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
   1538   __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   1539   // If slice offset is not 0, load the length from the original sliced string.
   1540   // Argument 4, r3: End of string data
   1541   // Argument 3, r2: Start of string data
   1542   // Prepare start and end index of the input.
   1543   __ add(r9, r7, Operand(r9, LSL, r3));
   1544   __ add(r2, r9, Operand(r1, LSL, r3));
   1545 
   1546   __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
   1547   __ SmiUntag(r7);
   1548   __ add(r3, r9, Operand(r7, LSL, r3));
   1549 
   1550   // Argument 2 (r1): Previous index.
   1551   // Already there
   1552 
   1553   // Argument 1 (r0): Subject string.
   1554   __ mov(r0, subject);
   1555 
   1556   // Locate the code entry and call it.
   1557   __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
   1558   DirectCEntryStub stub(isolate());
   1559   stub.GenerateCall(masm, r6);
   1560 
   1561   __ LeaveExitFrame(false, no_reg, true);
   1562 
   1563   last_match_info_elements = r6;
   1564 
   1565   // r0: result
   1566   // subject: subject string (callee saved)
   1567   // regexp_data: RegExp data (callee saved)
   1568   // last_match_info_elements: Last match info elements (callee saved)
   1569   // Check the result.
   1570   Label success;
   1571   __ cmp(r0, Operand(1));
   1572   // We expect exactly one result since we force the called regexp to behave
   1573   // as non-global.
   1574   __ b(eq, &success);
   1575   Label failure;
   1576   __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
   1577   __ b(eq, &failure);
   1578   __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   1579   // If not exception it can only be retry. Handle that in the runtime system.
   1580   __ b(ne, &runtime);
   1581   // Result must now be exception. If there is no pending exception already a
   1582   // stack overflow (on the backtrack stack) was detected in RegExp code but
   1583   // haven't created the exception yet. Handle that in the runtime system.
   1584   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   1585   __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
   1586   __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1587                                        isolate())));
   1588   __ ldr(r0, MemOperand(r2, 0));
   1589   __ cmp(r0, r1);
   1590   __ b(eq, &runtime);
   1591 
   1592   // For exception, throw the exception again.
   1593   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
   1594 
   1595   __ bind(&failure);
   1596   // For failure and exception return null.
   1597   __ mov(r0, Operand(isolate()->factory()->null_value()));
   1598   __ add(sp, sp, Operand(4 * kPointerSize));
   1599   __ Ret();
   1600 
   1601   // Process the result from the native regexp code.
   1602   __ bind(&success);
   1603   __ ldr(r1,
   1604          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   1605   // Calculate number of capture registers (number_of_captures + 1) * 2.
   1606   // Multiplying by 2 comes for free since r1 is smi-tagged.
   1607   STATIC_ASSERT(kSmiTag == 0);
   1608   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   1609   __ add(r1, r1, Operand(2));  // r1 was a smi.
   1610 
   1611   __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
   1612   __ JumpIfSmi(r0, &runtime);
   1613   __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
   1614   __ b(ne, &runtime);
   1615   // Check that the JSArray is in fast case.
   1616   __ ldr(last_match_info_elements,
   1617          FieldMemOperand(r0, JSArray::kElementsOffset));
   1618   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   1619   __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
   1620   __ b(ne, &runtime);
   1621   // Check that the last match info has space for the capture registers and the
   1622   // additional information.
   1623   __ ldr(r0,
   1624          FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
   1625   __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
   1626   __ cmp(r2, Operand::SmiUntag(r0));
   1627   __ b(gt, &runtime);
   1628 
   1629   // r1: number of capture registers
   1630   // r4: subject string
   1631   // Store the capture count.
   1632   __ SmiTag(r2, r1);
   1633   __ str(r2, FieldMemOperand(last_match_info_elements,
   1634                              RegExpImpl::kLastCaptureCountOffset));
   1635   // Store last subject and last input.
   1636   __ str(subject,
   1637          FieldMemOperand(last_match_info_elements,
   1638                          RegExpImpl::kLastSubjectOffset));
   1639   __ mov(r2, subject);
   1640   __ RecordWriteField(last_match_info_elements,
   1641                       RegExpImpl::kLastSubjectOffset,
   1642                       subject,
   1643                       r3,
   1644                       kLRHasNotBeenSaved,
   1645                       kDontSaveFPRegs);
   1646   __ mov(subject, r2);
   1647   __ str(subject,
   1648          FieldMemOperand(last_match_info_elements,
   1649                          RegExpImpl::kLastInputOffset));
   1650   __ RecordWriteField(last_match_info_elements,
   1651                       RegExpImpl::kLastInputOffset,
   1652                       subject,
   1653                       r3,
   1654                       kLRHasNotBeenSaved,
   1655                       kDontSaveFPRegs);
   1656 
   1657   // Get the static offsets vector filled by the native regexp code.
   1658   ExternalReference address_of_static_offsets_vector =
   1659       ExternalReference::address_of_static_offsets_vector(isolate());
   1660   __ mov(r2, Operand(address_of_static_offsets_vector));
   1661 
   1662   // r1: number of capture registers
   1663   // r2: offsets vector
   1664   Label next_capture, done;
   1665   // Capture register counter starts from number of capture registers and
   1666   // counts down until wraping after zero.
   1667   __ add(r0,
   1668          last_match_info_elements,
   1669          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
   1670   __ bind(&next_capture);
   1671   __ sub(r1, r1, Operand(1), SetCC);
   1672   __ b(mi, &done);
   1673   // Read the value from the static offsets vector buffer.
   1674   __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
   1675   // Store the smi value in the last match info.
   1676   __ SmiTag(r3);
   1677   __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
   1678   __ jmp(&next_capture);
   1679   __ bind(&done);
   1680 
   1681   // Return last match info.
   1682   __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
   1683   __ add(sp, sp, Operand(4 * kPointerSize));
   1684   __ Ret();
   1685 
   1686   // Do the runtime call to execute the regexp.
   1687   __ bind(&runtime);
   1688   __ TailCallRuntime(Runtime::kRegExpExec);
   1689 
   1690   // Deferred code for string handling.
   1691   // (5) Long external string?  If not, go to (7).
   1692   __ bind(&not_seq_nor_cons);
   1693   // Compare flags are still set.
   1694   __ b(gt, &not_long_external);  // Go to (7).
   1695 
   1696   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1697   __ bind(&external_string);
   1698   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   1699   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   1700   if (FLAG_debug_code) {
   1701     // Assert that we do not have a cons or slice (indirect strings) here.
   1702     // Sequential strings have already been ruled out.
   1703     __ tst(r0, Operand(kIsIndirectStringMask));
   1704     __ Assert(eq, kExternalStringExpectedButNotFound);
   1705   }
   1706   __ ldr(subject,
   1707          FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   1708   // Move the pointer so that offset-wise, it looks like a sequential string.
   1709   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   1710   __ sub(subject,
   1711          subject,
   1712          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   1713   __ jmp(&seq_string);  // Go to (4).
   1714 
   1715   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1716   __ bind(&not_long_external);
   1717   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
   1718   __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
   1719   __ b(ne, &runtime);
   1720 
   1721   // (8) Sliced string.  Replace subject with parent.  Go to (4).
   1722   // Load offset into r9 and replace subject string with parent.
   1723   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   1724   __ SmiUntag(r9);
   1725   __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   1726   __ jmp(&check_underlying);  // Go to (4).
   1727 #endif  // V8_INTERPRETED_REGEXP
   1728 }
   1729 
   1730 
   1731 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
   1732   // r0 : number of arguments to the construct function
   1733   // r1 : the function to call
   1734   // r2 : feedback vector
   1735   // r3 : slot in feedback vector (Smi)
   1736   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   1737 
   1738   // Number-of-arguments register must be smi-tagged to call out.
   1739   __ SmiTag(r0);
   1740   __ Push(r3, r2, r1, r0);
   1741 
   1742   __ CallStub(stub);
   1743 
   1744   __ Pop(r3, r2, r1, r0);
   1745   __ SmiUntag(r0);
   1746 }
   1747 
   1748 
   1749 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   1750   // Cache the called function in a feedback vector slot.  Cache states
   1751   // are uninitialized, monomorphic (indicated by a JSFunction), and
   1752   // megamorphic.
   1753   // r0 : number of arguments to the construct function
   1754   // r1 : the function to call
   1755   // r2 : feedback vector
   1756   // r3 : slot in feedback vector (Smi)
   1757   Label initialize, done, miss, megamorphic, not_array_function;
   1758   Label done_initialize_count, done_increment_count;
   1759 
   1760   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
   1761             masm->isolate()->heap()->megamorphic_symbol());
   1762   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
   1763             masm->isolate()->heap()->uninitialized_symbol());
   1764 
   1765   // Load the cache state into r5.
   1766   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
   1767   __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
   1768 
   1769   // A monomorphic cache hit or an already megamorphic state: invoke the
   1770   // function without changing the state.
   1771   // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
   1772   // this position in a symbol (see static asserts in type-feedback-vector.h).
   1773   Label check_allocation_site;
   1774   Register feedback_map = r6;
   1775   Register weak_value = r9;
   1776   __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
   1777   __ cmp(r1, weak_value);
   1778   __ b(eq, &done_increment_count);
   1779   __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
   1780   __ b(eq, &done);
   1781   __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
   1782   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
   1783   __ b(ne, &check_allocation_site);
   1784 
   1785   // If the weak cell is cleared, we have a new chance to become monomorphic.
   1786   __ JumpIfSmi(weak_value, &initialize);
   1787   __ jmp(&megamorphic);
   1788 
   1789   __ bind(&check_allocation_site);
   1790   // If we came here, we need to see if we are the array function.
   1791   // If we didn't have a matching function, and we didn't find the megamorph
   1792   // sentinel, then we have in the slot either some other function or an
   1793   // AllocationSite.
   1794   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
   1795   __ b(ne, &miss);
   1796 
   1797   // Make sure the function is the Array() function
   1798   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
   1799   __ cmp(r1, r5);
   1800   __ b(ne, &megamorphic);
   1801   __ jmp(&done_increment_count);
   1802 
   1803   __ bind(&miss);
   1804 
   1805   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   1806   // megamorphic.
   1807   __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
   1808   __ b(eq, &initialize);
   1809   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   1810   // write-barrier is needed.
   1811   __ bind(&megamorphic);
   1812   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
   1813   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
   1814   __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
   1815   __ jmp(&done);
   1816 
   1817   // An uninitialized cache is patched with the function
   1818   __ bind(&initialize);
   1819 
   1820   // Make sure the function is the Array() function
   1821   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
   1822   __ cmp(r1, r5);
   1823   __ b(ne, &not_array_function);
   1824 
   1825   // The target function is the Array constructor,
   1826   // Create an AllocationSite if we don't already have it, store it in the
   1827   // slot.
   1828   CreateAllocationSiteStub create_stub(masm->isolate());
   1829   CallStubInRecordCallTarget(masm, &create_stub);
   1830   __ b(&done_initialize_count);
   1831 
   1832   __ bind(&not_array_function);
   1833   CreateWeakCellStub weak_cell_stub(masm->isolate());
   1834   CallStubInRecordCallTarget(masm, &weak_cell_stub);
   1835 
   1836   __ bind(&done_initialize_count);
   1837   // Initialize the call counter.
   1838   __ Move(r5, Operand(Smi::FromInt(1)));
   1839   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
   1840   __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
   1841   __ b(&done);
   1842 
   1843   __ bind(&done_increment_count);
   1844 
   1845   // Increment the call count for monomorphic function calls.
   1846   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
   1847   __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
   1848   __ ldr(r4, FieldMemOperand(r5, 0));
   1849   __ add(r4, r4, Operand(Smi::FromInt(1)));
   1850   __ str(r4, FieldMemOperand(r5, 0));
   1851 
   1852   __ bind(&done);
   1853 }
   1854 
   1855 
   1856 void CallConstructStub::Generate(MacroAssembler* masm) {
   1857   // r0 : number of arguments
   1858   // r1 : the function to call
   1859   // r2 : feedback vector
   1860   // r3 : slot in feedback vector (Smi, for RecordCallTarget)
   1861 
   1862   Label non_function;
   1863   // Check that the function is not a smi.
   1864   __ JumpIfSmi(r1, &non_function);
   1865   // Check that the function is a JSFunction.
   1866   __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
   1867   __ b(ne, &non_function);
   1868 
   1869   GenerateRecordCallTarget(masm);
   1870 
   1871   __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
   1872   Label feedback_register_initialized;
   1873   // Put the AllocationSite from the feedback vector into r2, or undefined.
   1874   __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
   1875   __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
   1876   __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
   1877   __ b(eq, &feedback_register_initialized);
   1878   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   1879   __ bind(&feedback_register_initialized);
   1880 
   1881   __ AssertUndefinedOrAllocationSite(r2, r5);
   1882 
   1883   // Pass function as new target.
   1884   __ mov(r3, r1);
   1885 
   1886   // Tail call to the function-specific construct stub (still in the caller
   1887   // context at this point).
   1888   __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   1889   __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
   1890   __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
   1891 
   1892   __ bind(&non_function);
   1893   __ mov(r3, r1);
   1894   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   1895 }
   1896 
   1897 
   1898 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   1899   // r1 - function
   1900   // r3 - slot id
   1901   // r2 - vector
   1902   // r4 - allocation site (loaded from vector[slot])
   1903   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
   1904   __ cmp(r1, r5);
   1905   __ b(ne, miss);
   1906 
   1907   __ mov(r0, Operand(arg_count()));
   1908 
   1909   // Increment the call count for monomorphic function calls.
   1910   __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
   1911   __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
   1912   __ ldr(r3, FieldMemOperand(r2, 0));
   1913   __ add(r3, r3, Operand(Smi::FromInt(1)));
   1914   __ str(r3, FieldMemOperand(r2, 0));
   1915 
   1916   __ mov(r2, r4);
   1917   __ mov(r3, r1);
   1918   ArrayConstructorStub stub(masm->isolate(), arg_count());
   1919   __ TailCallStub(&stub);
   1920 }
   1921 
   1922 
   1923 void CallICStub::Generate(MacroAssembler* masm) {
   1924   // r1 - function
   1925   // r3 - slot id (Smi)
   1926   // r2 - vector
   1927   Label extra_checks_or_miss, call, call_function;
   1928   int argc = arg_count();
   1929   ParameterCount actual(argc);
   1930 
   1931   // The checks. First, does r1 match the recorded monomorphic target?
   1932   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
   1933   __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
   1934 
   1935   // We don't know that we have a weak cell. We might have a private symbol
   1936   // or an AllocationSite, but the memory is safe to examine.
   1937   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
   1938   // FixedArray.
   1939   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
   1940   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
   1941   // computed, meaning that it can't appear to be a pointer. If the low bit is
   1942   // 0, then hash is computed, but the 0 bit prevents the field from appearing
   1943   // to be a pointer.
   1944   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
   1945   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
   1946                     WeakCell::kValueOffset &&
   1947                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
   1948 
   1949   __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
   1950   __ cmp(r1, r5);
   1951   __ b(ne, &extra_checks_or_miss);
   1952 
   1953   // The compare above could have been a SMI/SMI comparison. Guard against this
   1954   // convincing us that we have a monomorphic JSFunction.
   1955   __ JumpIfSmi(r1, &extra_checks_or_miss);
   1956 
   1957   // Increment the call count for monomorphic function calls.
   1958   __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
   1959   __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
   1960   __ ldr(r3, FieldMemOperand(r2, 0));
   1961   __ add(r3, r3, Operand(Smi::FromInt(1)));
   1962   __ str(r3, FieldMemOperand(r2, 0));
   1963 
   1964   __ bind(&call_function);
   1965   __ mov(r0, Operand(argc));
   1966   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
   1967                                                     tail_call_mode()),
   1968           RelocInfo::CODE_TARGET);
   1969 
   1970   __ bind(&extra_checks_or_miss);
   1971   Label uninitialized, miss, not_allocation_site;
   1972 
   1973   __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
   1974   __ b(eq, &call);
   1975 
   1976   // Verify that r4 contains an AllocationSite
   1977   __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
   1978   __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
   1979   __ b(ne, &not_allocation_site);
   1980 
   1981   // We have an allocation site.
   1982   HandleArrayCase(masm, &miss);
   1983 
   1984   __ bind(&not_allocation_site);
   1985 
   1986   // The following cases attempt to handle MISS cases without going to the
   1987   // runtime.
   1988   if (FLAG_trace_ic) {
   1989     __ jmp(&miss);
   1990   }
   1991 
   1992   __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
   1993   __ b(eq, &uninitialized);
   1994 
   1995   // We are going megamorphic. If the feedback is a JSFunction, it is fine
   1996   // to handle it here. More complex cases are dealt with in the runtime.
   1997   __ AssertNotSmi(r4);
   1998   __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
   1999   __ b(ne, &miss);
   2000   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
   2001   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
   2002   __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
   2003 
   2004   __ bind(&call);
   2005   __ mov(r0, Operand(argc));
   2006   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
   2007           RelocInfo::CODE_TARGET);
   2008 
   2009   __ bind(&uninitialized);
   2010 
   2011   // We are going monomorphic, provided we actually have a JSFunction.
   2012   __ JumpIfSmi(r1, &miss);
   2013 
   2014   // Goto miss case if we do not have a function.
   2015   __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
   2016   __ b(ne, &miss);
   2017 
   2018   // Make sure the function is not the Array() function, which requires special
   2019   // behavior on MISS.
   2020   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
   2021   __ cmp(r1, r4);
   2022   __ b(eq, &miss);
   2023 
   2024   // Make sure the function belongs to the same native context.
   2025   __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
   2026   __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
   2027   __ ldr(ip, NativeContextMemOperand());
   2028   __ cmp(r4, ip);
   2029   __ b(ne, &miss);
   2030 
   2031   // Initialize the call counter.
   2032   __ Move(r5, Operand(Smi::FromInt(1)));
   2033   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
   2034   __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
   2035 
   2036   // Store the function. Use a stub since we need a frame for allocation.
   2037   // r2 - vector
   2038   // r3 - slot
   2039   // r1 - function
   2040   {
   2041     FrameScope scope(masm, StackFrame::INTERNAL);
   2042     CreateWeakCellStub create_stub(masm->isolate());
   2043     __ Push(r1);
   2044     __ CallStub(&create_stub);
   2045     __ Pop(r1);
   2046   }
   2047 
   2048   __ jmp(&call_function);
   2049 
   2050   // We are here because tracing is on or we encountered a MISS case we can't
   2051   // handle here.
   2052   __ bind(&miss);
   2053   GenerateMiss(masm);
   2054 
   2055   __ jmp(&call);
   2056 }
   2057 
   2058 
   2059 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   2060   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2061 
   2062   // Push the receiver and the function and feedback info.
   2063   __ Push(r1, r2, r3);
   2064 
   2065   // Call the entry.
   2066   __ CallRuntime(Runtime::kCallIC_Miss);
   2067 
   2068   // Move result to edi and exit the internal frame.
   2069   __ mov(r1, r0);
   2070 }
   2071 
   2072 
   2073 // StringCharCodeAtGenerator
   2074 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   2075   // If the receiver is a smi trigger the non-string case.
   2076   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
   2077     __ JumpIfSmi(object_, receiver_not_string_);
   2078 
   2079     // Fetch the instance type of the receiver into result register.
   2080     __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2081     __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2082     // If the receiver is not a string trigger the non-string case.
   2083     __ tst(result_, Operand(kIsNotStringMask));
   2084     __ b(ne, receiver_not_string_);
   2085   }
   2086 
   2087   // If the index is non-smi trigger the non-smi case.
   2088   __ JumpIfNotSmi(index_, &index_not_smi_);
   2089   __ bind(&got_smi_index_);
   2090 
   2091   // Check for index out of range.
   2092   __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
   2093   __ cmp(ip, Operand(index_));
   2094   __ b(ls, index_out_of_range_);
   2095 
   2096   __ SmiUntag(index_);
   2097 
   2098   StringCharLoadGenerator::Generate(masm,
   2099                                     object_,
   2100                                     index_,
   2101                                     result_,
   2102                                     &call_runtime_);
   2103 
   2104   __ SmiTag(result_);
   2105   __ bind(&exit_);
   2106 }
   2107 
   2108 
   2109 void StringCharCodeAtGenerator::GenerateSlow(
   2110     MacroAssembler* masm, EmbedMode embed_mode,
   2111     const RuntimeCallHelper& call_helper) {
   2112   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   2113 
   2114   // Index is not a smi.
   2115   __ bind(&index_not_smi_);
   2116   // If index is a heap number, try converting it to an integer.
   2117   __ CheckMap(index_,
   2118               result_,
   2119               Heap::kHeapNumberMapRootIndex,
   2120               index_not_number_,
   2121               DONT_DO_SMI_CHECK);
   2122   call_helper.BeforeCall(masm);
   2123   if (embed_mode == PART_OF_IC_HANDLER) {
   2124     __ Push(LoadWithVectorDescriptor::VectorRegister(),
   2125             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
   2126   } else {
   2127     // index_ is consumed by runtime conversion function.
   2128     __ Push(object_, index_);
   2129   }
   2130   __ CallRuntime(Runtime::kNumberToSmi);
   2131   // Save the conversion result before the pop instructions below
   2132   // have a chance to overwrite it.
   2133   __ Move(index_, r0);
   2134   if (embed_mode == PART_OF_IC_HANDLER) {
   2135     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
   2136            LoadWithVectorDescriptor::SlotRegister(), object_);
   2137   } else {
   2138     __ pop(object_);
   2139   }
   2140   // Reload the instance type.
   2141   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2142   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2143   call_helper.AfterCall(masm);
   2144   // If index is still not a smi, it must be out of range.
   2145   __ JumpIfNotSmi(index_, index_out_of_range_);
   2146   // Otherwise, return to the fast path.
   2147   __ jmp(&got_smi_index_);
   2148 
   2149   // Call runtime. We get here when the receiver is a string and the
   2150   // index is a number, but the code of getting the actual character
   2151   // is too complex (e.g., when the string needs to be flattened).
   2152   __ bind(&call_runtime_);
   2153   call_helper.BeforeCall(masm);
   2154   __ SmiTag(index_);
   2155   __ Push(object_, index_);
   2156   __ CallRuntime(Runtime::kStringCharCodeAtRT);
   2157   __ Move(result_, r0);
   2158   call_helper.AfterCall(masm);
   2159   __ jmp(&exit_);
   2160 
   2161   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   2162 }
   2163 
   2164 
   2165 // -------------------------------------------------------------------------
   2166 // StringCharFromCodeGenerator
   2167 
   2168 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   2169   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   2170   STATIC_ASSERT(kSmiTag == 0);
   2171   STATIC_ASSERT(kSmiShiftSize == 0);
   2172   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
   2173   __ tst(code_, Operand(kSmiTagMask |
   2174                         ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
   2175   __ b(ne, &slow_case_);
   2176 
   2177   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   2178   // At this point code register contains smi tagged one-byte char code.
   2179   __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
   2180   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   2181   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
   2182   __ b(eq, &slow_case_);
   2183   __ bind(&exit_);
   2184 }
   2185 
   2186 
   2187 void StringCharFromCodeGenerator::GenerateSlow(
   2188     MacroAssembler* masm,
   2189     const RuntimeCallHelper& call_helper) {
   2190   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
   2191 
   2192   __ bind(&slow_case_);
   2193   call_helper.BeforeCall(masm);
   2194   __ push(code_);
   2195   __ CallRuntime(Runtime::kStringCharFromCode);
   2196   __ Move(result_, r0);
   2197   call_helper.AfterCall(masm);
   2198   __ jmp(&exit_);
   2199 
   2200   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
   2201 }
   2202 
   2203 
   2204 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
   2205 
   2206 
   2207 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
   2208                                           Register dest,
   2209                                           Register src,
   2210                                           Register count,
   2211                                           Register scratch,
   2212                                           String::Encoding encoding) {
   2213   if (FLAG_debug_code) {
   2214     // Check that destination is word aligned.
   2215     __ tst(dest, Operand(kPointerAlignmentMask));
   2216     __ Check(eq, kDestinationOfCopyNotAligned);
   2217   }
   2218 
   2219   // Assumes word reads and writes are little endian.
   2220   // Nothing to do for zero characters.
   2221   Label done;
   2222   if (encoding == String::TWO_BYTE_ENCODING) {
   2223     __ add(count, count, Operand(count), SetCC);
   2224   }
   2225 
   2226   Register limit = count;  // Read until dest equals this.
   2227   __ add(limit, dest, Operand(count));
   2228 
   2229   Label loop_entry, loop;
   2230   // Copy bytes from src to dest until dest hits limit.
   2231   __ b(&loop_entry);
   2232   __ bind(&loop);
   2233   __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
   2234   __ strb(scratch, MemOperand(dest, 1, PostIndex));
   2235   __ bind(&loop_entry);
   2236   __ cmp(dest, Operand(limit));
   2237   __ b(lt, &loop);
   2238 
   2239   __ bind(&done);
   2240 }
   2241 
   2242 
   2243 void SubStringStub::Generate(MacroAssembler* masm) {
   2244   Label runtime;
   2245 
   2246   // Stack frame on entry.
   2247   //  lr: return address
   2248   //  sp[0]: to
   2249   //  sp[4]: from
   2250   //  sp[8]: string
   2251 
   2252   // This stub is called from the native-call %_SubString(...), so
   2253   // nothing can be assumed about the arguments. It is tested that:
   2254   //  "string" is a sequential string,
   2255   //  both "from" and "to" are smis, and
   2256   //  0 <= from <= to <= string.length.
   2257   // If any of these assumptions fail, we call the runtime system.
   2258 
   2259   const int kToOffset = 0 * kPointerSize;
   2260   const int kFromOffset = 1 * kPointerSize;
   2261   const int kStringOffset = 2 * kPointerSize;
   2262 
   2263   __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
   2264   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   2265   STATIC_ASSERT(kSmiTag == 0);
   2266   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   2267 
   2268   // Arithmetic shift right by one un-smi-tags. In this case we rotate right
   2269   // instead because we bail out on non-smi values: ROR and ASR are equivalent
   2270   // for smis but they set the flags in a way that's easier to optimize.
   2271   __ mov(r2, Operand(r2, ROR, 1), SetCC);
   2272   __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
   2273   // If either to or from had the smi tag bit set, then C is set now, and N
   2274   // has the same value: we rotated by 1, so the bottom bit is now the top bit.
   2275   // We want to bailout to runtime here if From is negative.  In that case, the
   2276   // next instruction is not executed and we fall through to bailing out to
   2277   // runtime.
   2278   // Executed if both r2 and r3 are untagged integers.
   2279   __ sub(r2, r2, Operand(r3), SetCC, cc);
   2280   // One of the above un-smis or the above SUB could have set N==1.
   2281   __ b(mi, &runtime);  // Either "from" or "to" is not an smi, or from > to.
   2282 
   2283   // Make sure first argument is a string.
   2284   __ ldr(r0, MemOperand(sp, kStringOffset));
   2285   __ JumpIfSmi(r0, &runtime);
   2286   Condition is_string = masm->IsObjectStringType(r0, r1);
   2287   __ b(NegateCondition(is_string), &runtime);
   2288 
   2289   Label single_char;
   2290   __ cmp(r2, Operand(1));
   2291   __ b(eq, &single_char);
   2292 
   2293   // Short-cut for the case of trivial substring.
   2294   Label return_r0;
   2295   // r0: original string
   2296   // r2: result string length
   2297   __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
   2298   __ cmp(r2, Operand(r4, ASR, 1));
   2299   // Return original string.
   2300   __ b(eq, &return_r0);
   2301   // Longer than original string's length or negative: unsafe arguments.
   2302   __ b(hi, &runtime);
   2303   // Shorter than original string's length: an actual substring.
   2304 
   2305   // Deal with different string types: update the index if necessary
   2306   // and put the underlying string into r5.
   2307   // r0: original string
   2308   // r1: instance type
   2309   // r2: length
   2310   // r3: from index (untagged)
   2311   Label underlying_unpacked, sliced_string, seq_or_external_string;
   2312   // If the string is not indirect, it can only be sequential or external.
   2313   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
   2314   STATIC_ASSERT(kIsIndirectStringMask != 0);
   2315   __ tst(r1, Operand(kIsIndirectStringMask));
   2316   __ b(eq, &seq_or_external_string);
   2317 
   2318   __ tst(r1, Operand(kSlicedNotConsMask));
   2319   __ b(ne, &sliced_string);
   2320   // Cons string.  Check whether it is flat, then fetch first part.
   2321   __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
   2322   __ CompareRoot(r5, Heap::kempty_stringRootIndex);
   2323   __ b(ne, &runtime);
   2324   __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
   2325   // Update instance type.
   2326   __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
   2327   __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
   2328   __ jmp(&underlying_unpacked);
   2329 
   2330   __ bind(&sliced_string);
   2331   // Sliced string.  Fetch parent and correct start index by offset.
   2332   __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
   2333   __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
   2334   __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
   2335   // Update instance type.
   2336   __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
   2337   __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
   2338   __ jmp(&underlying_unpacked);
   2339 
   2340   __ bind(&seq_or_external_string);
   2341   // Sequential or external string.  Just move string to the expected register.
   2342   __ mov(r5, r0);
   2343 
   2344   __ bind(&underlying_unpacked);
   2345 
   2346   if (FLAG_string_slices) {
   2347     Label copy_routine;
   2348     // r5: underlying subject string
   2349     // r1: instance type of underlying subject string
   2350     // r2: length
   2351     // r3: adjusted start index (untagged)
   2352     __ cmp(r2, Operand(SlicedString::kMinLength));
   2353     // Short slice.  Copy instead of slicing.
   2354     __ b(lt, &copy_routine);
   2355     // Allocate new sliced string.  At this point we do not reload the instance
   2356     // type including the string encoding because we simply rely on the info
   2357     // provided by the original string.  It does not matter if the original
   2358     // string's encoding is wrong because we always have to recheck encoding of
   2359     // the newly created string's parent anyways due to externalized strings.
   2360     Label two_byte_slice, set_slice_header;
   2361     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   2362     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   2363     __ tst(r1, Operand(kStringEncodingMask));
   2364     __ b(eq, &two_byte_slice);
   2365     __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
   2366     __ jmp(&set_slice_header);
   2367     __ bind(&two_byte_slice);
   2368     __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
   2369     __ bind(&set_slice_header);
   2370     __ mov(r3, Operand(r3, LSL, 1));
   2371     __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
   2372     __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
   2373     __ jmp(&return_r0);
   2374 
   2375     __ bind(&copy_routine);
   2376   }
   2377 
   2378   // r5: underlying subject string
   2379   // r1: instance type of underlying subject string
   2380   // r2: length
   2381   // r3: adjusted start index (untagged)
   2382   Label two_byte_sequential, sequential_string, allocate_result;
   2383   STATIC_ASSERT(kExternalStringTag != 0);
   2384   STATIC_ASSERT(kSeqStringTag == 0);
   2385   __ tst(r1, Operand(kExternalStringTag));
   2386   __ b(eq, &sequential_string);
   2387 
   2388   // Handle external string.
   2389   // Rule out short external strings.
   2390   STATIC_ASSERT(kShortExternalStringTag != 0);
   2391   __ tst(r1, Operand(kShortExternalStringTag));
   2392   __ b(ne, &runtime);
   2393   __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
   2394   // r5 already points to the first character of underlying string.
   2395   __ jmp(&allocate_result);
   2396 
   2397   __ bind(&sequential_string);
   2398   // Locate first character of underlying subject string.
   2399   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   2400   __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   2401 
   2402   __ bind(&allocate_result);
   2403   // Sequential acii string.  Allocate the result.
   2404   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   2405   __ tst(r1, Operand(kStringEncodingMask));
   2406   __ b(eq, &two_byte_sequential);
   2407 
   2408   // Allocate and copy the resulting one-byte string.
   2409   __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
   2410 
   2411   // Locate first character of substring to copy.
   2412   __ add(r5, r5, r3);
   2413   // Locate first character of result.
   2414   __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   2415 
   2416   // r0: result string
   2417   // r1: first character of result string
   2418   // r2: result string length
   2419   // r5: first character of substring to copy
   2420   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   2421   StringHelper::GenerateCopyCharacters(
   2422       masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
   2423   __ jmp(&return_r0);
   2424 
   2425   // Allocate and copy the resulting two-byte string.
   2426   __ bind(&two_byte_sequential);
   2427   __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
   2428 
   2429   // Locate first character of substring to copy.
   2430   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   2431   __ add(r5, r5, Operand(r3, LSL, 1));
   2432   // Locate first character of result.
   2433   __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   2434 
   2435   // r0: result string.
   2436   // r1: first character of result.
   2437   // r2: result length.
   2438   // r5: first character of substring to copy.
   2439   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   2440   StringHelper::GenerateCopyCharacters(
   2441       masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
   2442 
   2443   __ bind(&return_r0);
   2444   Counters* counters = isolate()->counters();
   2445   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   2446   __ Drop(3);
   2447   __ Ret();
   2448 
   2449   // Just jump to runtime to create the sub string.
   2450   __ bind(&runtime);
   2451   __ TailCallRuntime(Runtime::kSubString);
   2452 
   2453   __ bind(&single_char);
   2454   // r0: original string
   2455   // r1: instance type
   2456   // r2: length
   2457   // r3: from index (untagged)
   2458   __ SmiTag(r3, r3);
   2459   StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
   2460                                   RECEIVER_IS_STRING);
   2461   generator.GenerateFast(masm);
   2462   __ Drop(3);
   2463   __ Ret();
   2464   generator.SkipSlow(masm, &runtime);
   2465 }
   2466 
   2467 void ToStringStub::Generate(MacroAssembler* masm) {
   2468   // The ToString stub takes one argument in r0.
   2469   Label is_number;
   2470   __ JumpIfSmi(r0, &is_number);
   2471 
   2472   __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
   2473   // r0: receiver
   2474   // r1: receiver instance type
   2475   __ Ret(lo);
   2476 
   2477   Label not_heap_number;
   2478   __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
   2479   __ b(ne, &not_heap_number);
   2480   __ bind(&is_number);
   2481   NumberToStringStub stub(isolate());
   2482   __ TailCallStub(&stub);
   2483   __ bind(&not_heap_number);
   2484 
   2485   Label not_oddball;
   2486   __ cmp(r1, Operand(ODDBALL_TYPE));
   2487   __ b(ne, &not_oddball);
   2488   __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
   2489   __ Ret();
   2490   __ bind(&not_oddball);
   2491 
   2492   __ push(r0);  // Push argument.
   2493   __ TailCallRuntime(Runtime::kToString);
   2494 }
   2495 
   2496 
   2497 void ToNameStub::Generate(MacroAssembler* masm) {
   2498   // The ToName stub takes one argument in r0.
   2499   Label is_number;
   2500   __ JumpIfSmi(r0, &is_number);
   2501 
   2502   STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
   2503   __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
   2504   // r0: receiver
   2505   // r1: receiver instance type
   2506   __ Ret(ls);
   2507 
   2508   Label not_heap_number;
   2509   __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
   2510   __ b(ne, &not_heap_number);
   2511   __ bind(&is_number);
   2512   NumberToStringStub stub(isolate());
   2513   __ TailCallStub(&stub);
   2514   __ bind(&not_heap_number);
   2515 
   2516   Label not_oddball;
   2517   __ cmp(r1, Operand(ODDBALL_TYPE));
   2518   __ b(ne, &not_oddball);
   2519   __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
   2520   __ Ret();
   2521   __ bind(&not_oddball);
   2522 
   2523   __ push(r0);  // Push argument.
   2524   __ TailCallRuntime(Runtime::kToName);
   2525 }
   2526 
   2527 
   2528 void StringHelper::GenerateFlatOneByteStringEquals(
   2529     MacroAssembler* masm, Register left, Register right, Register scratch1,
   2530     Register scratch2, Register scratch3) {
   2531   Register length = scratch1;
   2532 
   2533   // Compare lengths.
   2534   Label strings_not_equal, check_zero_length;
   2535   __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
   2536   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
   2537   __ cmp(length, scratch2);
   2538   __ b(eq, &check_zero_length);
   2539   __ bind(&strings_not_equal);
   2540   __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
   2541   __ Ret();
   2542 
   2543   // Check if the length is zero.
   2544   Label compare_chars;
   2545   __ bind(&check_zero_length);
   2546   STATIC_ASSERT(kSmiTag == 0);
   2547   __ cmp(length, Operand::Zero());
   2548   __ b(ne, &compare_chars);
   2549   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   2550   __ Ret();
   2551 
   2552   // Compare characters.
   2553   __ bind(&compare_chars);
   2554   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
   2555                                   &strings_not_equal);
   2556 
   2557   // Characters are equal.
   2558   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   2559   __ Ret();
   2560 }
   2561 
   2562 
   2563 void StringHelper::GenerateCompareFlatOneByteStrings(
   2564     MacroAssembler* masm, Register left, Register right, Register scratch1,
   2565     Register scratch2, Register scratch3, Register scratch4) {
   2566   Label result_not_equal, compare_lengths;
   2567   // Find minimum length and length difference.
   2568   __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
   2569   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
   2570   __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
   2571   Register length_delta = scratch3;
   2572   __ mov(scratch1, scratch2, LeaveCC, gt);
   2573   Register min_length = scratch1;
   2574   STATIC_ASSERT(kSmiTag == 0);
   2575   __ cmp(min_length, Operand::Zero());
   2576   __ b(eq, &compare_lengths);
   2577 
   2578   // Compare loop.
   2579   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
   2580                                   scratch4, &result_not_equal);
   2581 
   2582   // Compare lengths - strings up to min-length are equal.
   2583   __ bind(&compare_lengths);
   2584   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   2585   // Use length_delta as result if it's zero.
   2586   __ mov(r0, Operand(length_delta), SetCC);
   2587   __ bind(&result_not_equal);
   2588   // Conditionally update the result based either on length_delta or
   2589   // the last comparion performed in the loop above.
   2590   __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
   2591   __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
   2592   __ Ret();
   2593 }
   2594 
   2595 
   2596 void StringHelper::GenerateOneByteCharsCompareLoop(
   2597     MacroAssembler* masm, Register left, Register right, Register length,
   2598     Register scratch1, Register scratch2, Label* chars_not_equal) {
   2599   // Change index to run from -length to -1 by adding length to string
   2600   // start. This means that loop ends when index reaches zero, which
   2601   // doesn't need an additional compare.
   2602   __ SmiUntag(length);
   2603   __ add(scratch1, length,
   2604          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   2605   __ add(left, left, Operand(scratch1));
   2606   __ add(right, right, Operand(scratch1));
   2607   __ rsb(length, length, Operand::Zero());
   2608   Register index = length;  // index = -length;
   2609 
   2610   // Compare loop.
   2611   Label loop;
   2612   __ bind(&loop);
   2613   __ ldrb(scratch1, MemOperand(left, index));
   2614   __ ldrb(scratch2, MemOperand(right, index));
   2615   __ cmp(scratch1, scratch2);
   2616   __ b(ne, chars_not_equal);
   2617   __ add(index, index, Operand(1), SetCC);
   2618   __ b(ne, &loop);
   2619 }
   2620 
   2621 
   2622 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   2623   // ----------- S t a t e -------------
   2624   //  -- r1    : left
   2625   //  -- r0    : right
   2626   //  -- lr    : return address
   2627   // -----------------------------------
   2628 
   2629   // Load r2 with the allocation site.  We stick an undefined dummy value here
   2630   // and replace it with the real allocation site later when we instantiate this
   2631   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   2632   __ Move(r2, isolate()->factory()->undefined_value());
   2633 
   2634   // Make sure that we actually patched the allocation site.
   2635   if (FLAG_debug_code) {
   2636     __ tst(r2, Operand(kSmiTagMask));
   2637     __ Assert(ne, kExpectedAllocationSite);
   2638     __ push(r2);
   2639     __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
   2640     __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
   2641     __ cmp(r2, ip);
   2642     __ pop(r2);
   2643     __ Assert(eq, kExpectedAllocationSite);
   2644   }
   2645 
   2646   // Tail call into the stub that handles binary operations with allocation
   2647   // sites.
   2648   BinaryOpWithAllocationSiteStub stub(isolate(), state());
   2649   __ TailCallStub(&stub);
   2650 }
   2651 
   2652 
   2653 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   2654   DCHECK_EQ(CompareICState::BOOLEAN, state());
   2655   Label miss;
   2656 
   2657   __ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2658   __ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2659   if (!Token::IsEqualityOp(op())) {
   2660     __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
   2661     __ AssertSmi(r1);
   2662     __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
   2663     __ AssertSmi(r0);
   2664   }
   2665   __ sub(r0, r1, r0);
   2666   __ Ret();
   2667 
   2668   __ bind(&miss);
   2669   GenerateMiss(masm);
   2670 }
   2671 
   2672 
   2673 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   2674   DCHECK(state() == CompareICState::SMI);
   2675   Label miss;
   2676   __ orr(r2, r1, r0);
   2677   __ JumpIfNotSmi(r2, &miss);
   2678 
   2679   if (GetCondition() == eq) {
   2680     // For equality we do not care about the sign of the result.
   2681     __ sub(r0, r0, r1, SetCC);
   2682   } else {
   2683     // Untag before subtracting to avoid handling overflow.
   2684     __ SmiUntag(r1);
   2685     __ sub(r0, r1, Operand::SmiUntag(r0));
   2686   }
   2687   __ Ret();
   2688 
   2689   __ bind(&miss);
   2690   GenerateMiss(masm);
   2691 }
   2692 
   2693 
   2694 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
   2695   DCHECK(state() == CompareICState::NUMBER);
   2696 
   2697   Label generic_stub;
   2698   Label unordered, maybe_undefined1, maybe_undefined2;
   2699   Label miss;
   2700 
   2701   if (left() == CompareICState::SMI) {
   2702     __ JumpIfNotSmi(r1, &miss);
   2703   }
   2704   if (right() == CompareICState::SMI) {
   2705     __ JumpIfNotSmi(r0, &miss);
   2706   }
   2707 
   2708   // Inlining the double comparison and falling back to the general compare
   2709   // stub if NaN is involved.
   2710   // Load left and right operand.
   2711   Label done, left, left_smi, right_smi;
   2712   __ JumpIfSmi(r0, &right_smi);
   2713   __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
   2714               DONT_DO_SMI_CHECK);
   2715   __ sub(r2, r0, Operand(kHeapObjectTag));
   2716   __ vldr(d1, r2, HeapNumber::kValueOffset);
   2717   __ b(&left);
   2718   __ bind(&right_smi);
   2719   __ SmiToDouble(d1, r0);
   2720 
   2721   __ bind(&left);
   2722   __ JumpIfSmi(r1, &left_smi);
   2723   __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
   2724               DONT_DO_SMI_CHECK);
   2725   __ sub(r2, r1, Operand(kHeapObjectTag));
   2726   __ vldr(d0, r2, HeapNumber::kValueOffset);
   2727   __ b(&done);
   2728   __ bind(&left_smi);
   2729   __ SmiToDouble(d0, r1);
   2730 
   2731   __ bind(&done);
   2732   // Compare operands.
   2733   __ VFPCompareAndSetFlags(d0, d1);
   2734 
   2735   // Don't base result on status bits when a NaN is involved.
   2736   __ b(vs, &unordered);
   2737 
   2738   // Return a result of -1, 0, or 1, based on status bits.
   2739   __ mov(r0, Operand(EQUAL), LeaveCC, eq);
   2740   __ mov(r0, Operand(LESS), LeaveCC, lt);
   2741   __ mov(r0, Operand(GREATER), LeaveCC, gt);
   2742   __ Ret();
   2743 
   2744   __ bind(&unordered);
   2745   __ bind(&generic_stub);
   2746   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
   2747                      CompareICState::GENERIC, CompareICState::GENERIC);
   2748   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   2749 
   2750   __ bind(&maybe_undefined1);
   2751   if (Token::IsOrderedRelationalCompareOp(op())) {
   2752     __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
   2753     __ b(ne, &miss);
   2754     __ JumpIfSmi(r1, &unordered);
   2755     __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
   2756     __ b(ne, &maybe_undefined2);
   2757     __ jmp(&unordered);
   2758   }
   2759 
   2760   __ bind(&maybe_undefined2);
   2761   if (Token::IsOrderedRelationalCompareOp(op())) {
   2762     __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
   2763     __ b(eq, &unordered);
   2764   }
   2765 
   2766   __ bind(&miss);
   2767   GenerateMiss(masm);
   2768 }
   2769 
   2770 
   2771 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   2772   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   2773   Label miss;
   2774 
   2775   // Registers containing left and right operands respectively.
   2776   Register left = r1;
   2777   Register right = r0;
   2778   Register tmp1 = r2;
   2779   Register tmp2 = r3;
   2780 
   2781   // Check that both operands are heap objects.
   2782   __ JumpIfEitherSmi(left, right, &miss);
   2783 
   2784   // Check that both operands are internalized strings.
   2785   __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2786   __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2787   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2788   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2789   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   2790   __ orr(tmp1, tmp1, Operand(tmp2));
   2791   __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   2792   __ b(ne, &miss);
   2793 
   2794   // Internalized strings are compared by identity.
   2795   __ cmp(left, right);
   2796   // Make sure r0 is non-zero. At this point input operands are
   2797   // guaranteed to be non-zero.
   2798   DCHECK(right.is(r0));
   2799   STATIC_ASSERT(EQUAL == 0);
   2800   STATIC_ASSERT(kSmiTag == 0);
   2801   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
   2802   __ Ret();
   2803 
   2804   __ bind(&miss);
   2805   GenerateMiss(masm);
   2806 }
   2807 
   2808 
   2809 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
   2810   DCHECK(state() == CompareICState::UNIQUE_NAME);
   2811   DCHECK(GetCondition() == eq);
   2812   Label miss;
   2813 
   2814   // Registers containing left and right operands respectively.
   2815   Register left = r1;
   2816   Register right = r0;
   2817   Register tmp1 = r2;
   2818   Register tmp2 = r3;
   2819 
   2820   // Check that both operands are heap objects.
   2821   __ JumpIfEitherSmi(left, right, &miss);
   2822 
   2823   // Check that both operands are unique names. This leaves the instance
   2824   // types loaded in tmp1 and tmp2.
   2825   __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2826   __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2827   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2828   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2829 
   2830   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
   2831   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
   2832 
   2833   // Unique names are compared by identity.
   2834   __ cmp(left, right);
   2835   // Make sure r0 is non-zero. At this point input operands are
   2836   // guaranteed to be non-zero.
   2837   DCHECK(right.is(r0));
   2838   STATIC_ASSERT(EQUAL == 0);
   2839   STATIC_ASSERT(kSmiTag == 0);
   2840   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
   2841   __ Ret();
   2842 
   2843   __ bind(&miss);
   2844   GenerateMiss(masm);
   2845 }
   2846 
   2847 
   2848 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
   2849   DCHECK(state() == CompareICState::STRING);
   2850   Label miss;
   2851 
   2852   bool equality = Token::IsEqualityOp(op());
   2853 
   2854   // Registers containing left and right operands respectively.
   2855   Register left = r1;
   2856   Register right = r0;
   2857   Register tmp1 = r2;
   2858   Register tmp2 = r3;
   2859   Register tmp3 = r4;
   2860   Register tmp4 = r5;
   2861 
   2862   // Check that both operands are heap objects.
   2863   __ JumpIfEitherSmi(left, right, &miss);
   2864 
   2865   // Check that both operands are strings. This leaves the instance
   2866   // types loaded in tmp1 and tmp2.
   2867   __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2868   __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2869   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2870   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2871   STATIC_ASSERT(kNotStringTag != 0);
   2872   __ orr(tmp3, tmp1, tmp2);
   2873   __ tst(tmp3, Operand(kIsNotStringMask));
   2874   __ b(ne, &miss);
   2875 
   2876   // Fast check for identical strings.
   2877   __ cmp(left, right);
   2878   STATIC_ASSERT(EQUAL == 0);
   2879   STATIC_ASSERT(kSmiTag == 0);
   2880   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
   2881   __ Ret(eq);
   2882 
   2883   // Handle not identical strings.
   2884 
   2885   // Check that both strings are internalized strings. If they are, we're done
   2886   // because we already know they are not identical. We know they are both
   2887   // strings.
   2888   if (equality) {
   2889     DCHECK(GetCondition() == eq);
   2890     STATIC_ASSERT(kInternalizedTag == 0);
   2891     __ orr(tmp3, tmp1, Operand(tmp2));
   2892     __ tst(tmp3, Operand(kIsNotInternalizedMask));
   2893     // Make sure r0 is non-zero. At this point input operands are
   2894     // guaranteed to be non-zero.
   2895     DCHECK(right.is(r0));
   2896     __ Ret(eq);
   2897   }
   2898 
   2899   // Check that both strings are sequential one-byte.
   2900   Label runtime;
   2901   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
   2902                                                     &runtime);
   2903 
   2904   // Compare flat one-byte strings. Returns when done.
   2905   if (equality) {
   2906     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
   2907                                                   tmp3);
   2908   } else {
   2909     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
   2910                                                     tmp2, tmp3, tmp4);
   2911   }
   2912 
   2913   // Handle more complex cases in runtime.
   2914   __ bind(&runtime);
   2915   if (equality) {
   2916     {
   2917       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2918       __ Push(left, right);
   2919       __ CallRuntime(Runtime::kStringEqual);
   2920     }
   2921     __ LoadRoot(r1, Heap::kTrueValueRootIndex);
   2922     __ sub(r0, r0, r1);
   2923     __ Ret();
   2924   } else {
   2925     __ Push(left, right);
   2926     __ TailCallRuntime(Runtime::kStringCompare);
   2927   }
   2928 
   2929   __ bind(&miss);
   2930   GenerateMiss(masm);
   2931 }
   2932 
   2933 
   2934 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
   2935   DCHECK_EQ(CompareICState::RECEIVER, state());
   2936   Label miss;
   2937   __ and_(r2, r1, Operand(r0));
   2938   __ JumpIfSmi(r2, &miss);
   2939 
   2940   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   2941   __ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
   2942   __ b(lt, &miss);
   2943   __ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
   2944   __ b(lt, &miss);
   2945 
   2946   DCHECK(GetCondition() == eq);
   2947   __ sub(r0, r0, Operand(r1));
   2948   __ Ret();
   2949 
   2950   __ bind(&miss);
   2951   GenerateMiss(masm);
   2952 }
   2953 
   2954 
   2955 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   2956   Label miss;
   2957   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   2958   __ and_(r2, r1, Operand(r0));
   2959   __ JumpIfSmi(r2, &miss);
   2960   __ GetWeakValue(r4, cell);
   2961   __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
   2962   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
   2963   __ cmp(r2, r4);
   2964   __ b(ne, &miss);
   2965   __ cmp(r3, r4);
   2966   __ b(ne, &miss);
   2967 
   2968   if (Token::IsEqualityOp(op())) {
   2969     __ sub(r0, r0, Operand(r1));
   2970     __ Ret();
   2971   } else {
   2972     if (op() == Token::LT || op() == Token::LTE) {
   2973       __ mov(r2, Operand(Smi::FromInt(GREATER)));
   2974     } else {
   2975       __ mov(r2, Operand(Smi::FromInt(LESS)));
   2976     }
   2977     __ Push(r1, r0, r2);
   2978     __ TailCallRuntime(Runtime::kCompare);
   2979   }
   2980 
   2981   __ bind(&miss);
   2982   GenerateMiss(masm);
   2983 }
   2984 
   2985 
   2986 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   2987   {
   2988     // Call the runtime system in a fresh internal frame.
   2989     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2990     __ Push(r1, r0);
   2991     __ Push(lr, r1, r0);
   2992     __ mov(ip, Operand(Smi::FromInt(op())));
   2993     __ push(ip);
   2994     __ CallRuntime(Runtime::kCompareIC_Miss);
   2995     // Compute the entry point of the rewritten stub.
   2996     __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
   2997     // Restore registers.
   2998     __ pop(lr);
   2999     __ Pop(r1, r0);
   3000   }
   3001 
   3002   __ Jump(r2);
   3003 }
   3004 
   3005 
   3006 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   3007   // Place the return address on the stack, making the call
   3008   // GC safe. The RegExp backend also relies on this.
   3009   __ str(lr, MemOperand(sp, 0));
   3010   __ blx(ip);  // Call the C++ function.
   3011   __ ldr(pc, MemOperand(sp, 0));
   3012 }
   3013 
   3014 
   3015 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   3016                                     Register target) {
   3017   intptr_t code =
   3018       reinterpret_cast<intptr_t>(GetCode().location());
   3019   __ Move(ip, target);
   3020   __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
   3021   __ blx(lr);  // Call the stub.
   3022 }
   3023 
   3024 
   3025 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
   3026                                                       Label* miss,
   3027                                                       Label* done,
   3028                                                       Register receiver,
   3029                                                       Register properties,
   3030                                                       Handle<Name> name,
   3031                                                       Register scratch0) {
   3032   DCHECK(name->IsUniqueName());
   3033   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   3034   // not equal to the name and kProbes-th slot is not used (its name is the
   3035   // undefined value), it guarantees the hash table doesn't contain the
   3036   // property. It's true even if some slots represent deleted properties
   3037   // (their names are the hole value).
   3038   for (int i = 0; i < kInlinedProbes; i++) {
   3039     // scratch0 points to properties hash.
   3040     // Compute the masked index: (hash + i + i * i) & mask.
   3041     Register index = scratch0;
   3042     // Capacity is smi 2^n.
   3043     __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
   3044     __ sub(index, index, Operand(1));
   3045     __ and_(index, index, Operand(
   3046         Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
   3047 
   3048     // Scale the index by multiplying by the entry size.
   3049     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3050     __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
   3051 
   3052     Register entity_name = scratch0;
   3053     // Having undefined at this place means the name is not contained.
   3054     STATIC_ASSERT(kSmiTagSize == 1);
   3055     Register tmp = properties;
   3056     __ add(tmp, properties, Operand(index, LSL, 1));
   3057     __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   3058 
   3059     DCHECK(!tmp.is(entity_name));
   3060     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
   3061     __ cmp(entity_name, tmp);
   3062     __ b(eq, done);
   3063 
   3064     // Load the hole ready for use below:
   3065     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
   3066 
   3067     // Stop if found the property.
   3068     __ cmp(entity_name, Operand(Handle<Name>(name)));
   3069     __ b(eq, miss);
   3070 
   3071     Label good;
   3072     __ cmp(entity_name, tmp);
   3073     __ b(eq, &good);
   3074 
   3075     // Check if the entry name is not a unique name.
   3076     __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   3077     __ ldrb(entity_name,
   3078             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   3079     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
   3080     __ bind(&good);
   3081 
   3082     // Restore the properties.
   3083     __ ldr(properties,
   3084            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   3085   }
   3086 
   3087   const int spill_mask =
   3088       (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
   3089        r2.bit() | r1.bit() | r0.bit());
   3090 
   3091   __ stm(db_w, sp, spill_mask);
   3092   __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   3093   __ mov(r1, Operand(Handle<Name>(name)));
   3094   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   3095   __ CallStub(&stub);
   3096   __ cmp(r0, Operand::Zero());
   3097   __ ldm(ia_w, sp, spill_mask);
   3098 
   3099   __ b(eq, done);
   3100   __ b(ne, miss);
   3101 }
   3102 
   3103 
   3104 // Probe the name dictionary in the |elements| register. Jump to the
   3105 // |done| label if a property with the given name is found. Jump to
   3106 // the |miss| label otherwise.
   3107 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
   3108 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
   3109                                                       Label* miss,
   3110                                                       Label* done,
   3111                                                       Register elements,
   3112                                                       Register name,
   3113                                                       Register scratch1,
   3114                                                       Register scratch2) {
   3115   DCHECK(!elements.is(scratch1));
   3116   DCHECK(!elements.is(scratch2));
   3117   DCHECK(!name.is(scratch1));
   3118   DCHECK(!name.is(scratch2));
   3119 
   3120   __ AssertName(name);
   3121 
   3122   // Compute the capacity mask.
   3123   __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
   3124   __ SmiUntag(scratch1);
   3125   __ sub(scratch1, scratch1, Operand(1));
   3126 
   3127   // Generate an unrolled loop that performs a few probes before
   3128   // giving up. Measurements done on Gmail indicate that 2 probes
   3129   // cover ~93% of loads from dictionaries.
   3130   for (int i = 0; i < kInlinedProbes; i++) {
   3131     // Compute the masked index: (hash + i + i * i) & mask.
   3132     __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
   3133     if (i > 0) {
   3134       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   3135       // the hash in a separate instruction. The value hash + i + i * i is right
   3136       // shifted in the following and instruction.
   3137       DCHECK(NameDictionary::GetProbeOffset(i) <
   3138              1 << (32 - Name::kHashFieldOffset));
   3139       __ add(scratch2, scratch2, Operand(
   3140           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   3141     }
   3142     __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
   3143 
   3144     // Scale the index by multiplying by the entry size.
   3145     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3146     // scratch2 = scratch2 * 3.
   3147     __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
   3148 
   3149     // Check if the key is identical to the name.
   3150     __ add(scratch2, elements, Operand(scratch2, LSL, 2));
   3151     __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
   3152     __ cmp(name, Operand(ip));
   3153     __ b(eq, done);
   3154   }
   3155 
   3156   const int spill_mask =
   3157       (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
   3158        r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
   3159       ~(scratch1.bit() | scratch2.bit());
   3160 
   3161   __ stm(db_w, sp, spill_mask);
   3162   if (name.is(r0)) {
   3163     DCHECK(!elements.is(r1));
   3164     __ Move(r1, name);
   3165     __ Move(r0, elements);
   3166   } else {
   3167     __ Move(r0, elements);
   3168     __ Move(r1, name);
   3169   }
   3170   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
   3171   __ CallStub(&stub);
   3172   __ cmp(r0, Operand::Zero());
   3173   __ mov(scratch2, Operand(r2));
   3174   __ ldm(ia_w, sp, spill_mask);
   3175 
   3176   __ b(ne, done);
   3177   __ b(eq, miss);
   3178 }
   3179 
   3180 
   3181 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   3182   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   3183   // we cannot call anything that could cause a GC from this stub.
   3184   // Registers:
   3185   //  result: NameDictionary to probe
   3186   //  r1: key
   3187   //  dictionary: NameDictionary to probe.
   3188   //  index: will hold an index of entry if lookup is successful.
   3189   //         might alias with result_.
   3190   // Returns:
   3191   //  result_ is zero if lookup failed, non zero otherwise.
   3192 
   3193   Register result = r0;
   3194   Register dictionary = r0;
   3195   Register key = r1;
   3196   Register index = r2;
   3197   Register mask = r3;
   3198   Register hash = r4;
   3199   Register undefined = r5;
   3200   Register entry_key = r6;
   3201 
   3202   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   3203 
   3204   __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
   3205   __ SmiUntag(mask);
   3206   __ sub(mask, mask, Operand(1));
   3207 
   3208   __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
   3209 
   3210   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   3211 
   3212   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   3213     // Compute the masked index: (hash + i + i * i) & mask.
   3214     // Capacity is smi 2^n.
   3215     if (i > 0) {
   3216       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   3217       // the hash in a separate instruction. The value hash + i + i * i is right
   3218       // shifted in the following and instruction.
   3219       DCHECK(NameDictionary::GetProbeOffset(i) <
   3220              1 << (32 - Name::kHashFieldOffset));
   3221       __ add(index, hash, Operand(
   3222           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   3223     } else {
   3224       __ mov(index, Operand(hash));
   3225     }
   3226     __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
   3227 
   3228     // Scale the index by multiplying by the entry size.
   3229     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3230     __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
   3231 
   3232     STATIC_ASSERT(kSmiTagSize == 1);
   3233     __ add(index, dictionary, Operand(index, LSL, 2));
   3234     __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
   3235 
   3236     // Having undefined at this place means the name is not contained.
   3237     __ cmp(entry_key, Operand(undefined));
   3238     __ b(eq, &not_in_dictionary);
   3239 
   3240     // Stop if found the property.
   3241     __ cmp(entry_key, Operand(key));
   3242     __ b(eq, &in_dictionary);
   3243 
   3244     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
   3245       // Check if the entry name is not a unique name.
   3246       __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   3247       __ ldrb(entry_key,
   3248               FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   3249       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
   3250     }
   3251   }
   3252 
   3253   __ bind(&maybe_in_dictionary);
   3254   // If we are doing negative lookup then probing failure should be
   3255   // treated as a lookup success. For positive lookup probing failure
   3256   // should be treated as lookup failure.
   3257   if (mode() == POSITIVE_LOOKUP) {
   3258     __ mov(result, Operand::Zero());
   3259     __ Ret();
   3260   }
   3261 
   3262   __ bind(&in_dictionary);
   3263   __ mov(result, Operand(1));
   3264   __ Ret();
   3265 
   3266   __ bind(&not_in_dictionary);
   3267   __ mov(result, Operand::Zero());
   3268   __ Ret();
   3269 }
   3270 
   3271 
   3272 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
   3273     Isolate* isolate) {
   3274   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
   3275   stub1.GetCode();
   3276   // Hydrogen code stubs need stub2 at snapshot time.
   3277   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
   3278   stub2.GetCode();
   3279 }
   3280 
   3281 
   3282 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
   3283 // the value has just been written into the object, now this stub makes sure
   3284 // we keep the GC informed.  The word in the object where the value has been
   3285 // written is in the address register.
   3286 void RecordWriteStub::Generate(MacroAssembler* masm) {
   3287   Label skip_to_incremental_noncompacting;
   3288   Label skip_to_incremental_compacting;
   3289 
   3290   // The first two instructions are generated with labels so as to get the
   3291   // offset fixed up correctly by the bind(Label*) call.  We patch it back and
   3292   // forth between a compare instructions (a nop in this position) and the
   3293   // real branch when we start and stop incremental heap marking.
   3294   // See RecordWriteStub::Patch for details.
   3295   {
   3296     // Block literal pool emission, as the position of these two instructions
   3297     // is assumed by the patching code.
   3298     Assembler::BlockConstPoolScope block_const_pool(masm);
   3299     __ b(&skip_to_incremental_noncompacting);
   3300     __ b(&skip_to_incremental_compacting);
   3301   }
   3302 
   3303   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   3304     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   3305                            MacroAssembler::kReturnAtEnd);
   3306   }
   3307   __ Ret();
   3308 
   3309   __ bind(&skip_to_incremental_noncompacting);
   3310   GenerateIncremental(masm, INCREMENTAL);
   3311 
   3312   __ bind(&skip_to_incremental_compacting);
   3313   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   3314 
   3315   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
   3316   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
   3317   DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
   3318   DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
   3319   PatchBranchIntoNop(masm, 0);
   3320   PatchBranchIntoNop(masm, Assembler::kInstrSize);
   3321 }
   3322 
   3323 
   3324 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   3325   regs_.Save(masm);
   3326 
   3327   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   3328     Label dont_need_remembered_set;
   3329 
   3330     __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
   3331     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
   3332                            regs_.scratch0(),
   3333                            &dont_need_remembered_set);
   3334 
   3335     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
   3336                         &dont_need_remembered_set);
   3337 
   3338     // First notify the incremental marker if necessary, then update the
   3339     // remembered set.
   3340     CheckNeedsToInformIncrementalMarker(
   3341         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   3342     InformIncrementalMarker(masm);
   3343     regs_.Restore(masm);
   3344     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   3345                            MacroAssembler::kReturnAtEnd);
   3346 
   3347     __ bind(&dont_need_remembered_set);
   3348   }
   3349 
   3350   CheckNeedsToInformIncrementalMarker(
   3351       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   3352   InformIncrementalMarker(masm);
   3353   regs_.Restore(masm);
   3354   __ Ret();
   3355 }
   3356 
   3357 
   3358 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   3359   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   3360   int argument_count = 3;
   3361   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   3362   Register address =
   3363       r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
   3364   DCHECK(!address.is(regs_.object()));
   3365   DCHECK(!address.is(r0));
   3366   __ Move(address, regs_.address());
   3367   __ Move(r0, regs_.object());
   3368   __ Move(r1, address);
   3369   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
   3370 
   3371   AllowExternalCallThatCantCauseGC scope(masm);
   3372   __ CallCFunction(
   3373       ExternalReference::incremental_marking_record_write_function(isolate()),
   3374       argument_count);
   3375   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
   3376 }
   3377 
   3378 
   3379 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   3380     MacroAssembler* masm,
   3381     OnNoNeedToInformIncrementalMarker on_no_need,
   3382     Mode mode) {
   3383   Label on_black;
   3384   Label need_incremental;
   3385   Label need_incremental_pop_scratch;
   3386 
   3387   __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
   3388   __ ldr(regs_.scratch1(),
   3389          MemOperand(regs_.scratch0(),
   3390                     MemoryChunk::kWriteBarrierCounterOffset));
   3391   __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
   3392   __ str(regs_.scratch1(),
   3393          MemOperand(regs_.scratch0(),
   3394                     MemoryChunk::kWriteBarrierCounterOffset));
   3395   __ b(mi, &need_incremental);
   3396 
   3397   // Let's look at the color of the object:  If it is not black we don't have
   3398   // to inform the incremental marker.
   3399   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   3400 
   3401   regs_.Restore(masm);
   3402   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   3403     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   3404                            MacroAssembler::kReturnAtEnd);
   3405   } else {
   3406     __ Ret();
   3407   }
   3408 
   3409   __ bind(&on_black);
   3410 
   3411   // Get the value from the slot.
   3412   __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
   3413 
   3414   if (mode == INCREMENTAL_COMPACTION) {
   3415     Label ensure_not_white;
   3416 
   3417     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
   3418                      regs_.scratch1(),  // Scratch.
   3419                      MemoryChunk::kEvacuationCandidateMask,
   3420                      eq,
   3421                      &ensure_not_white);
   3422 
   3423     __ CheckPageFlag(regs_.object(),
   3424                      regs_.scratch1(),  // Scratch.
   3425                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
   3426                      eq,
   3427                      &need_incremental);
   3428 
   3429     __ bind(&ensure_not_white);
   3430   }
   3431 
   3432   // We need extra registers for this, so we push the object and the address
   3433   // register temporarily.
   3434   __ Push(regs_.object(), regs_.address());
   3435   __ JumpIfWhite(regs_.scratch0(),  // The value.
   3436                  regs_.scratch1(),  // Scratch.
   3437                  regs_.object(),    // Scratch.
   3438                  regs_.address(),   // Scratch.
   3439                  &need_incremental_pop_scratch);
   3440   __ Pop(regs_.object(), regs_.address());
   3441 
   3442   regs_.Restore(masm);
   3443   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   3444     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   3445                            MacroAssembler::kReturnAtEnd);
   3446   } else {
   3447     __ Ret();
   3448   }
   3449 
   3450   __ bind(&need_incremental_pop_scratch);
   3451   __ Pop(regs_.object(), regs_.address());
   3452 
   3453   __ bind(&need_incremental);
   3454 
   3455   // Fall through when we need to inform the incremental marker.
   3456 }
   3457 
   3458 
   3459 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   3460   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   3461   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   3462   int parameter_count_offset =
   3463       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   3464   __ ldr(r1, MemOperand(fp, parameter_count_offset));
   3465   if (function_mode() == JS_FUNCTION_STUB_MODE) {
   3466     __ add(r1, r1, Operand(1));
   3467   }
   3468   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   3469   __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
   3470   __ add(sp, sp, r1);
   3471   __ Ret();
   3472 }
   3473 
   3474 
   3475 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   3476   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   3477   LoadICStub stub(isolate());
   3478   stub.GenerateForTrampoline(masm);
   3479 }
   3480 
   3481 
   3482 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   3483   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   3484   KeyedLoadICStub stub(isolate());
   3485   stub.GenerateForTrampoline(masm);
   3486 }
   3487 
   3488 
   3489 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   3490   __ EmitLoadTypeFeedbackVector(r2);
   3491   CallICStub stub(isolate(), state());
   3492   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   3493 }
   3494 
   3495 
   3496 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
   3497 
   3498 
   3499 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3500   GenerateImpl(masm, true);
   3501 }
   3502 
   3503 
   3504 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
   3505                              Register receiver_map, Register scratch1,
   3506                              Register scratch2, bool is_polymorphic,
   3507                              Label* miss) {
   3508   // feedback initially contains the feedback array
   3509   Label next_loop, prepare_next;
   3510   Label start_polymorphic;
   3511 
   3512   Register cached_map = scratch1;
   3513 
   3514   __ ldr(cached_map,
   3515          FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
   3516   __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   3517   __ cmp(receiver_map, cached_map);
   3518   __ b(ne, &start_polymorphic);
   3519   // found, now call handler.
   3520   Register handler = feedback;
   3521   __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
   3522   __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   3523 
   3524 
   3525   Register length = scratch2;
   3526   __ bind(&start_polymorphic);
   3527   __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   3528   if (!is_polymorphic) {
   3529     // If the IC could be monomorphic we have to make sure we don't go past the
   3530     // end of the feedback array.
   3531     __ cmp(length, Operand(Smi::FromInt(2)));
   3532     __ b(eq, miss);
   3533   }
   3534 
   3535   Register too_far = length;
   3536   Register pointer_reg = feedback;
   3537 
   3538   // +-----+------+------+-----+-----+ ... ----+
   3539   // | map | len  | wm0  | h0  | wm1 |      hN |
   3540   // +-----+------+------+-----+-----+ ... ----+
   3541   //                 0      1     2        len-1
   3542   //                              ^              ^
   3543   //                              |              |
   3544   //                         pointer_reg      too_far
   3545   //                         aka feedback     scratch2
   3546   // also need receiver_map
   3547   // use cached_map (scratch1) to look in the weak map values.
   3548   __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
   3549   __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   3550   __ add(pointer_reg, feedback,
   3551          Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
   3552 
   3553   __ bind(&next_loop);
   3554   __ ldr(cached_map, MemOperand(pointer_reg));
   3555   __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   3556   __ cmp(receiver_map, cached_map);
   3557   __ b(ne, &prepare_next);
   3558   __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
   3559   __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   3560 
   3561   __ bind(&prepare_next);
   3562   __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
   3563   __ cmp(pointer_reg, too_far);
   3564   __ b(lt, &next_loop);
   3565 
   3566   // We exhausted our array of map handler pairs.
   3567   __ jmp(miss);
   3568 }
   3569 
   3570 
   3571 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
   3572                                   Register receiver_map, Register feedback,
   3573                                   Register vector, Register slot,
   3574                                   Register scratch, Label* compare_map,
   3575                                   Label* load_smi_map, Label* try_array) {
   3576   __ JumpIfSmi(receiver, load_smi_map);
   3577   __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   3578   __ bind(compare_map);
   3579   Register cached_map = scratch;
   3580   // Move the weak map into the weak_cell register.
   3581   __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
   3582   __ cmp(cached_map, receiver_map);
   3583   __ b(ne, try_array);
   3584   Register handler = feedback;
   3585   __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
   3586   __ ldr(handler,
   3587          FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
   3588   __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   3589 }
   3590 
   3591 
   3592 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3593   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r1
   3594   Register name = LoadWithVectorDescriptor::NameRegister();          // r2
   3595   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r3
   3596   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r0
   3597   Register feedback = r4;
   3598   Register receiver_map = r5;
   3599   Register scratch1 = r6;
   3600 
   3601   __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
   3602   __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3603 
   3604   // Try to quickly handle the monomorphic case without knowing for sure
   3605   // if we have a weak cell in feedback. We do know it's safe to look
   3606   // at WeakCell::kValueOffset.
   3607   Label try_array, load_smi_map, compare_map;
   3608   Label not_array, miss;
   3609   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3610                         scratch1, &compare_map, &load_smi_map, &try_array);
   3611 
   3612   // Is it a fixed array?
   3613   __ bind(&try_array);
   3614   __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3615   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   3616   __ b(ne, &not_array);
   3617   HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
   3618 
   3619   __ bind(&not_array);
   3620   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   3621   __ b(ne, &miss);
   3622   Code::Flags code_flags =
   3623       Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   3624   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
   3625                                                receiver, name, feedback,
   3626                                                receiver_map, scratch1, r9);
   3627 
   3628   __ bind(&miss);
   3629   LoadIC::GenerateMiss(masm);
   3630 
   3631   __ bind(&load_smi_map);
   3632   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3633   __ jmp(&compare_map);
   3634 }
   3635 
   3636 
   3637 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
   3638   GenerateImpl(masm, false);
   3639 }
   3640 
   3641 
   3642 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3643   GenerateImpl(masm, true);
   3644 }
   3645 
   3646 
   3647 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3648   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r1
   3649   Register key = LoadWithVectorDescriptor::NameRegister();           // r2
   3650   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r3
   3651   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r0
   3652   Register feedback = r4;
   3653   Register receiver_map = r5;
   3654   Register scratch1 = r6;
   3655 
   3656   __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
   3657   __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3658 
   3659   // Try to quickly handle the monomorphic case without knowing for sure
   3660   // if we have a weak cell in feedback. We do know it's safe to look
   3661   // at WeakCell::kValueOffset.
   3662   Label try_array, load_smi_map, compare_map;
   3663   Label not_array, miss;
   3664   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3665                         scratch1, &compare_map, &load_smi_map, &try_array);
   3666 
   3667   __ bind(&try_array);
   3668   // Is it a fixed array?
   3669   __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3670   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   3671   __ b(ne, &not_array);
   3672 
   3673   // We have a polymorphic element handler.
   3674   Label polymorphic, try_poly_name;
   3675   __ bind(&polymorphic);
   3676   HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
   3677 
   3678   __ bind(&not_array);
   3679   // Is it generic?
   3680   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   3681   __ b(ne, &try_poly_name);
   3682   Handle<Code> megamorphic_stub =
   3683       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   3684   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   3685 
   3686   __ bind(&try_poly_name);
   3687   // We might have a name in feedback, and a fixed array in the next slot.
   3688   __ cmp(key, feedback);
   3689   __ b(ne, &miss);
   3690   // If the name comparison succeeded, we know we have a fixed array with
   3691   // at least one map/handler pair.
   3692   __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
   3693   __ ldr(feedback,
   3694          FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   3695   HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
   3696 
   3697   __ bind(&miss);
   3698   KeyedLoadIC::GenerateMiss(masm);
   3699 
   3700   __ bind(&load_smi_map);
   3701   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3702   __ jmp(&compare_map);
   3703 }
   3704 
   3705 
   3706 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   3707   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   3708   VectorStoreICStub stub(isolate(), state());
   3709   stub.GenerateForTrampoline(masm);
   3710 }
   3711 
   3712 
   3713 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   3714   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   3715   VectorKeyedStoreICStub stub(isolate(), state());
   3716   stub.GenerateForTrampoline(masm);
   3717 }
   3718 
   3719 
   3720 void VectorStoreICStub::Generate(MacroAssembler* masm) {
   3721   GenerateImpl(masm, false);
   3722 }
   3723 
   3724 
   3725 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3726   GenerateImpl(masm, true);
   3727 }
   3728 
   3729 
   3730 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3731   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r1
   3732   Register key = VectorStoreICDescriptor::NameRegister();           // r2
   3733   Register vector = VectorStoreICDescriptor::VectorRegister();      // r3
   3734   Register slot = VectorStoreICDescriptor::SlotRegister();          // r4
   3735   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0));          // r0
   3736   Register feedback = r5;
   3737   Register receiver_map = r6;
   3738   Register scratch1 = r9;
   3739 
   3740   __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
   3741   __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3742 
   3743   // Try to quickly handle the monomorphic case without knowing for sure
   3744   // if we have a weak cell in feedback. We do know it's safe to look
   3745   // at WeakCell::kValueOffset.
   3746   Label try_array, load_smi_map, compare_map;
   3747   Label not_array, miss;
   3748   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3749                         scratch1, &compare_map, &load_smi_map, &try_array);
   3750 
   3751   // Is it a fixed array?
   3752   __ bind(&try_array);
   3753   __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3754   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   3755   __ b(ne, &not_array);
   3756 
   3757   // We are using register r8, which is used for the embedded constant pool
   3758   // when FLAG_enable_embedded_constant_pool is true.
   3759   DCHECK(!FLAG_enable_embedded_constant_pool);
   3760   Register scratch2 = r8;
   3761   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
   3762                    &miss);
   3763 
   3764   __ bind(&not_array);
   3765   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   3766   __ b(ne, &miss);
   3767   Code::Flags code_flags =
   3768       Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   3769   masm->isolate()->stub_cache()->GenerateProbe(
   3770       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
   3771       scratch1, scratch2);
   3772 
   3773   __ bind(&miss);
   3774   StoreIC::GenerateMiss(masm);
   3775 
   3776   __ bind(&load_smi_map);
   3777   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3778   __ jmp(&compare_map);
   3779 }
   3780 
   3781 
   3782 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
   3783   GenerateImpl(masm, false);
   3784 }
   3785 
   3786 
   3787 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   3788   GenerateImpl(masm, true);
   3789 }
   3790 
   3791 
   3792 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
   3793                                        Register receiver_map, Register scratch1,
   3794                                        Register scratch2, Label* miss) {
   3795   // feedback initially contains the feedback array
   3796   Label next_loop, prepare_next;
   3797   Label start_polymorphic;
   3798   Label transition_call;
   3799 
   3800   Register cached_map = scratch1;
   3801   Register too_far = scratch2;
   3802   Register pointer_reg = feedback;
   3803   __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   3804 
   3805   // +-----+------+------+-----+-----+-----+ ... ----+
   3806   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
   3807   // +-----+------+------+-----+-----+ ----+ ... ----+
   3808   //                 0      1     2              len-1
   3809   //                 ^                                 ^
   3810   //                 |                                 |
   3811   //             pointer_reg                        too_far
   3812   //             aka feedback                       scratch2
   3813   // also need receiver_map
   3814   // use cached_map (scratch1) to look in the weak map values.
   3815   __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
   3816   __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   3817   __ add(pointer_reg, feedback,
   3818          Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
   3819 
   3820   __ bind(&next_loop);
   3821   __ ldr(cached_map, MemOperand(pointer_reg));
   3822   __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   3823   __ cmp(receiver_map, cached_map);
   3824   __ b(ne, &prepare_next);
   3825   // Is it a transitioning store?
   3826   __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
   3827   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
   3828   __ b(ne, &transition_call);
   3829   __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
   3830   __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
   3831 
   3832   __ bind(&transition_call);
   3833   __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
   3834   __ JumpIfSmi(too_far, miss);
   3835 
   3836   __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
   3837 
   3838   // Load the map into the correct register.
   3839   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
   3840   __ mov(feedback, too_far);
   3841 
   3842   __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
   3843 
   3844   __ bind(&prepare_next);
   3845   __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
   3846   __ cmp(pointer_reg, too_far);
   3847   __ b(lt, &next_loop);
   3848 
   3849   // We exhausted our array of map handler pairs.
   3850   __ jmp(miss);
   3851 }
   3852 
   3853 
   3854 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   3855   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r1
   3856   Register key = VectorStoreICDescriptor::NameRegister();           // r2
   3857   Register vector = VectorStoreICDescriptor::VectorRegister();      // r3
   3858   Register slot = VectorStoreICDescriptor::SlotRegister();          // r4
   3859   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0));          // r0
   3860   Register feedback = r5;
   3861   Register receiver_map = r6;
   3862   Register scratch1 = r9;
   3863 
   3864   __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
   3865   __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   3866 
   3867   // Try to quickly handle the monomorphic case without knowing for sure
   3868   // if we have a weak cell in feedback. We do know it's safe to look
   3869   // at WeakCell::kValueOffset.
   3870   Label try_array, load_smi_map, compare_map;
   3871   Label not_array, miss;
   3872   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   3873                         scratch1, &compare_map, &load_smi_map, &try_array);
   3874 
   3875   __ bind(&try_array);
   3876   // Is it a fixed array?
   3877   __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   3878   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   3879   __ b(ne, &not_array);
   3880 
   3881   // We have a polymorphic element handler.
   3882   Label polymorphic, try_poly_name;
   3883   __ bind(&polymorphic);
   3884 
   3885   // We are using register r8, which is used for the embedded constant pool
   3886   // when FLAG_enable_embedded_constant_pool is true.
   3887   DCHECK(!FLAG_enable_embedded_constant_pool);
   3888   Register scratch2 = r8;
   3889 
   3890   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
   3891                              &miss);
   3892 
   3893   __ bind(&not_array);
   3894   // Is it generic?
   3895   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   3896   __ b(ne, &try_poly_name);
   3897   Handle<Code> megamorphic_stub =
   3898       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   3899   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   3900 
   3901   __ bind(&try_poly_name);
   3902   // We might have a name in feedback, and a fixed array in the next slot.
   3903   __ cmp(key, feedback);
   3904   __ b(ne, &miss);
   3905   // If the name comparison succeeded, we know we have a fixed array with
   3906   // at least one map/handler pair.
   3907   __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
   3908   __ ldr(feedback,
   3909          FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   3910   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
   3911                    &miss);
   3912 
   3913   __ bind(&miss);
   3914   KeyedStoreIC::GenerateMiss(masm);
   3915 
   3916   __ bind(&load_smi_map);
   3917   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   3918   __ jmp(&compare_map);
   3919 }
   3920 
   3921 
   3922 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   3923   if (masm->isolate()->function_entry_hook() != NULL) {
   3924     ProfileEntryHookStub stub(masm->isolate());
   3925     PredictableCodeSizeScope predictable(masm);
   3926     predictable.ExpectSize(masm->CallStubSize(&stub) +
   3927                            2 * Assembler::kInstrSize);
   3928     __ push(lr);
   3929     __ CallStub(&stub);
   3930     __ pop(lr);
   3931   }
   3932 }
   3933 
   3934 
   3935 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   3936   // The entry hook is a "push lr" instruction, followed by a call.
   3937   const int32_t kReturnAddressDistanceFromFunctionStart =
   3938       3 * Assembler::kInstrSize;
   3939 
   3940   // This should contain all kCallerSaved registers.
   3941   const RegList kSavedRegs =
   3942       1 <<  0 |  // r0
   3943       1 <<  1 |  // r1
   3944       1 <<  2 |  // r2
   3945       1 <<  3 |  // r3
   3946       1 <<  5 |  // r5
   3947       1 <<  9;   // r9
   3948   // We also save lr, so the count here is one higher than the mask indicates.
   3949   const int32_t kNumSavedRegs = 7;
   3950 
   3951   DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
   3952 
   3953   // Save all caller-save registers as this may be called from anywhere.
   3954   __ stm(db_w, sp, kSavedRegs | lr.bit());
   3955 
   3956   // Compute the function's address for the first argument.
   3957   __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
   3958 
   3959   // The caller's return address is above the saved temporaries.
   3960   // Grab that for the second argument to the hook.
   3961   __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
   3962 
   3963   // Align the stack if necessary.
   3964   int frame_alignment = masm->ActivationFrameAlignment();
   3965   if (frame_alignment > kPointerSize) {
   3966     __ mov(r5, sp);
   3967     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   3968     __ and_(sp, sp, Operand(-frame_alignment));
   3969   }
   3970 
   3971 #if V8_HOST_ARCH_ARM
   3972   int32_t entry_hook =
   3973       reinterpret_cast<int32_t>(isolate()->function_entry_hook());
   3974   __ mov(ip, Operand(entry_hook));
   3975 #else
   3976   // Under the simulator we need to indirect the entry hook through a
   3977   // trampoline function at a known address.
   3978   // It additionally takes an isolate as a third parameter
   3979   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
   3980 
   3981   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   3982   __ mov(ip, Operand(ExternalReference(&dispatcher,
   3983                                        ExternalReference::BUILTIN_CALL,
   3984                                        isolate())));
   3985 #endif
   3986   __ Call(ip);
   3987 
   3988   // Restore the stack pointer if needed.
   3989   if (frame_alignment > kPointerSize) {
   3990     __ mov(sp, r5);
   3991   }
   3992 
   3993   // Also pop pc to get Ret(0).
   3994   __ ldm(ia_w, sp, kSavedRegs | pc.bit());
   3995 }
   3996 
   3997 
   3998 template<class T>
   3999 static void CreateArrayDispatch(MacroAssembler* masm,
   4000                                 AllocationSiteOverrideMode mode) {
   4001   if (mode == DISABLE_ALLOCATION_SITES) {
   4002     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   4003     __ TailCallStub(&stub);
   4004   } else if (mode == DONT_OVERRIDE) {
   4005     int last_index = GetSequenceIndexFromFastElementsKind(
   4006         TERMINAL_FAST_ELEMENTS_KIND);
   4007     for (int i = 0; i <= last_index; ++i) {
   4008       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4009       __ cmp(r3, Operand(kind));
   4010       T stub(masm->isolate(), kind);
   4011       __ TailCallStub(&stub, eq);
   4012     }
   4013 
   4014     // If we reached this point there is a problem.
   4015     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4016   } else {
   4017     UNREACHABLE();
   4018   }
   4019 }
   4020 
   4021 
   4022 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   4023                                            AllocationSiteOverrideMode mode) {
   4024   // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   4025   // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
   4026   // r0 - number of arguments
   4027   // r1 - constructor?
   4028   // sp[0] - last argument
   4029   Label normal_sequence;
   4030   if (mode == DONT_OVERRIDE) {
   4031     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   4032     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   4033     STATIC_ASSERT(FAST_ELEMENTS == 2);
   4034     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   4035     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   4036     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   4037 
   4038     // is the low bit set? If so, we are holey and that is good.
   4039     __ tst(r3, Operand(1));
   4040     __ b(ne, &normal_sequence);
   4041   }
   4042 
   4043   // look at the first argument
   4044   __ ldr(r5, MemOperand(sp, 0));
   4045   __ cmp(r5, Operand::Zero());
   4046   __ b(eq, &normal_sequence);
   4047 
   4048   if (mode == DISABLE_ALLOCATION_SITES) {
   4049     ElementsKind initial = GetInitialFastElementsKind();
   4050     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   4051 
   4052     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
   4053                                                   holey_initial,
   4054                                                   DISABLE_ALLOCATION_SITES);
   4055     __ TailCallStub(&stub_holey);
   4056 
   4057     __ bind(&normal_sequence);
   4058     ArraySingleArgumentConstructorStub stub(masm->isolate(),
   4059                                             initial,
   4060                                             DISABLE_ALLOCATION_SITES);
   4061     __ TailCallStub(&stub);
   4062   } else if (mode == DONT_OVERRIDE) {
   4063     // We are going to create a holey array, but our kind is non-holey.
   4064     // Fix kind and retry (only if we have an allocation site in the slot).
   4065     __ add(r3, r3, Operand(1));
   4066 
   4067     if (FLAG_debug_code) {
   4068       __ ldr(r5, FieldMemOperand(r2, 0));
   4069       __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
   4070       __ Assert(eq, kExpectedAllocationSite);
   4071     }
   4072 
   4073     // Save the resulting elements kind in type info. We can't just store r3
   4074     // in the AllocationSite::transition_info field because elements kind is
   4075     // restricted to a portion of the field...upper bits need to be left alone.
   4076     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   4077     __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
   4078     __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
   4079     __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
   4080 
   4081     __ bind(&normal_sequence);
   4082     int last_index = GetSequenceIndexFromFastElementsKind(
   4083         TERMINAL_FAST_ELEMENTS_KIND);
   4084     for (int i = 0; i <= last_index; ++i) {
   4085       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4086       __ cmp(r3, Operand(kind));
   4087       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
   4088       __ TailCallStub(&stub, eq);
   4089     }
   4090 
   4091     // If we reached this point there is a problem.
   4092     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4093   } else {
   4094     UNREACHABLE();
   4095   }
   4096 }
   4097 
   4098 
   4099 template<class T>
   4100 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   4101   int to_index = GetSequenceIndexFromFastElementsKind(
   4102       TERMINAL_FAST_ELEMENTS_KIND);
   4103   for (int i = 0; i <= to_index; ++i) {
   4104     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4105     T stub(isolate, kind);
   4106     stub.GetCode();
   4107     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   4108       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   4109       stub1.GetCode();
   4110     }
   4111   }
   4112 }
   4113 
   4114 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   4115   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   4116       isolate);
   4117   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
   4118       isolate);
   4119   ArrayNArgumentsConstructorStub stub(isolate);
   4120   stub.GetCode();
   4121   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   4122   for (int i = 0; i < 2; i++) {
   4123     // For internal arrays we only need a few things
   4124     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   4125     stubh1.GetCode();
   4126     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   4127     stubh2.GetCode();
   4128   }
   4129 }
   4130 
   4131 
   4132 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   4133     MacroAssembler* masm,
   4134     AllocationSiteOverrideMode mode) {
   4135   if (argument_count() == ANY) {
   4136     Label not_zero_case, not_one_case;
   4137     __ tst(r0, r0);
   4138     __ b(ne, &not_zero_case);
   4139     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   4140 
   4141     __ bind(&not_zero_case);
   4142     __ cmp(r0, Operand(1));
   4143     __ b(gt, &not_one_case);
   4144     CreateArrayDispatchOneArgument(masm, mode);
   4145 
   4146     __ bind(&not_one_case);
   4147     ArrayNArgumentsConstructorStub stub(masm->isolate());
   4148     __ TailCallStub(&stub);
   4149   } else if (argument_count() == NONE) {
   4150     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   4151   } else if (argument_count() == ONE) {
   4152     CreateArrayDispatchOneArgument(masm, mode);
   4153   } else if (argument_count() == MORE_THAN_ONE) {
   4154     ArrayNArgumentsConstructorStub stub(masm->isolate());
   4155     __ TailCallStub(&stub);
   4156   } else {
   4157     UNREACHABLE();
   4158   }
   4159 }
   4160 
   4161 
   4162 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   4163   // ----------- S t a t e -------------
   4164   //  -- r0 : argc (only if argument_count() == ANY)
   4165   //  -- r1 : constructor
   4166   //  -- r2 : AllocationSite or undefined
   4167   //  -- r3 : new target
   4168   //  -- sp[0] : return address
   4169   //  -- sp[4] : last argument
   4170   // -----------------------------------
   4171 
   4172   if (FLAG_debug_code) {
   4173     // The array construct code is only set for the global and natives
   4174     // builtin Array functions which always have maps.
   4175 
   4176     // Initial map for the builtin Array function should be a map.
   4177     __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
   4178     // Will both indicate a NULL and a Smi.
   4179     __ tst(r4, Operand(kSmiTagMask));
   4180     __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
   4181     __ CompareObjectType(r4, r4, r5, MAP_TYPE);
   4182     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
   4183 
   4184     // We should either have undefined in r2 or a valid AllocationSite
   4185     __ AssertUndefinedOrAllocationSite(r2, r4);
   4186   }
   4187 
   4188   // Enter the context of the Array function.
   4189   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
   4190 
   4191   Label subclassing;
   4192   __ cmp(r3, r1);
   4193   __ b(ne, &subclassing);
   4194 
   4195   Label no_info;
   4196   // Get the elements kind and case on that.
   4197   __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
   4198   __ b(eq, &no_info);
   4199 
   4200   __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
   4201   __ SmiUntag(r3);
   4202   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   4203   __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
   4204   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   4205 
   4206   __ bind(&no_info);
   4207   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   4208 
   4209   __ bind(&subclassing);
   4210   switch (argument_count()) {
   4211     case ANY:
   4212     case MORE_THAN_ONE:
   4213       __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
   4214       __ add(r0, r0, Operand(3));
   4215       break;
   4216     case NONE:
   4217       __ str(r1, MemOperand(sp, 0 * kPointerSize));
   4218       __ mov(r0, Operand(3));
   4219       break;
   4220     case ONE:
   4221       __ str(r1, MemOperand(sp, 1 * kPointerSize));
   4222       __ mov(r0, Operand(4));
   4223       break;
   4224   }
   4225   __ Push(r3, r2);
   4226   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
   4227 }
   4228 
   4229 
   4230 void InternalArrayConstructorStub::GenerateCase(
   4231     MacroAssembler* masm, ElementsKind kind) {
   4232   __ cmp(r0, Operand(1));
   4233 
   4234   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   4235   __ TailCallStub(&stub0, lo);
   4236 
   4237   ArrayNArgumentsConstructorStub stubN(isolate());
   4238   __ TailCallStub(&stubN, hi);
   4239 
   4240   if (IsFastPackedElementsKind(kind)) {
   4241     // We might need to create a holey array
   4242     // look at the first argument
   4243     __ ldr(r3, MemOperand(sp, 0));
   4244     __ cmp(r3, Operand::Zero());
   4245 
   4246     InternalArraySingleArgumentConstructorStub
   4247         stub1_holey(isolate(), GetHoleyElementsKind(kind));
   4248     __ TailCallStub(&stub1_holey, ne);
   4249   }
   4250 
   4251   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   4252   __ TailCallStub(&stub1);
   4253 }
   4254 
   4255 
   4256 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   4257   // ----------- S t a t e -------------
   4258   //  -- r0 : argc
   4259   //  -- r1 : constructor
   4260   //  -- sp[0] : return address
   4261   //  -- sp[4] : last argument
   4262   // -----------------------------------
   4263 
   4264   if (FLAG_debug_code) {
   4265     // The array construct code is only set for the global and natives
   4266     // builtin Array functions which always have maps.
   4267 
   4268     // Initial map for the builtin Array function should be a map.
   4269     __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
   4270     // Will both indicate a NULL and a Smi.
   4271     __ tst(r3, Operand(kSmiTagMask));
   4272     __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
   4273     __ CompareObjectType(r3, r3, r4, MAP_TYPE);
   4274     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
   4275   }
   4276 
   4277   // Figure out the right elements kind
   4278   __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
   4279   // Load the map's "bit field 2" into |result|. We only need the first byte,
   4280   // but the following bit field extraction takes care of that anyway.
   4281   __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
   4282   // Retrieve elements_kind from bit field 2.
   4283   __ DecodeField<Map::ElementsKindBits>(r3);
   4284 
   4285   if (FLAG_debug_code) {
   4286     Label done;
   4287     __ cmp(r3, Operand(FAST_ELEMENTS));
   4288     __ b(eq, &done);
   4289     __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
   4290     __ Assert(eq,
   4291               kInvalidElementsKindForInternalArrayOrInternalPackedArray);
   4292     __ bind(&done);
   4293   }
   4294 
   4295   Label fast_elements_case;
   4296   __ cmp(r3, Operand(FAST_ELEMENTS));
   4297   __ b(eq, &fast_elements_case);
   4298   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   4299 
   4300   __ bind(&fast_elements_case);
   4301   GenerateCase(masm, FAST_ELEMENTS);
   4302 }
   4303 
   4304 
   4305 void FastNewObjectStub::Generate(MacroAssembler* masm) {
   4306   // ----------- S t a t e -------------
   4307   //  -- r1 : target
   4308   //  -- r3 : new target
   4309   //  -- cp : context
   4310   //  -- lr : return address
   4311   // -----------------------------------
   4312   __ AssertFunction(r1);
   4313   __ AssertReceiver(r3);
   4314 
   4315   // Verify that the new target is a JSFunction.
   4316   Label new_object;
   4317   __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
   4318   __ b(ne, &new_object);
   4319 
   4320   // Load the initial map and verify that it's in fact a map.
   4321   __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
   4322   __ JumpIfSmi(r2, &new_object);
   4323   __ CompareObjectType(r2, r0, r0, MAP_TYPE);
   4324   __ b(ne, &new_object);
   4325 
   4326   // Fall back to runtime if the target differs from the new target's
   4327   // initial map constructor.
   4328   __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
   4329   __ cmp(r0, r1);
   4330   __ b(ne, &new_object);
   4331 
   4332   // Allocate the JSObject on the heap.
   4333   Label allocate, done_allocate;
   4334   __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
   4335   __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
   4336   __ bind(&done_allocate);
   4337 
   4338   // Initialize the JSObject fields.
   4339   __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
   4340   __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
   4341   __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   4342   __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
   4343   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
   4344   __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
   4345 
   4346   // ----------- S t a t e -------------
   4347   //  -- r0 : result (tagged)
   4348   //  -- r1 : result fields (untagged)
   4349   //  -- r5 : result end (untagged)
   4350   //  -- r2 : initial map
   4351   //  -- cp : context
   4352   //  -- lr : return address
   4353   // -----------------------------------
   4354 
   4355   // Perform in-object slack tracking if requested.
   4356   Label slack_tracking;
   4357   STATIC_ASSERT(Map::kNoSlackTracking == 0);
   4358   __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
   4359   __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
   4360   __ tst(r3, Operand(Map::ConstructionCounter::kMask));
   4361   __ b(ne, &slack_tracking);
   4362   {
   4363     // Initialize all in-object fields with undefined.
   4364     __ InitializeFieldsWithFiller(r1, r5, r6);
   4365     __ Ret();
   4366   }
   4367   __ bind(&slack_tracking);
   4368   {
   4369     // Decrease generous allocation count.
   4370     STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
   4371     __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
   4372     __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
   4373 
   4374     // Initialize the in-object fields with undefined.
   4375     __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
   4376     __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
   4377     __ InitializeFieldsWithFiller(r1, r4, r6);
   4378 
   4379     // Initialize the remaining (reserved) fields with one pointer filler map.
   4380     __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
   4381     __ InitializeFieldsWithFiller(r1, r5, r6);
   4382 
   4383     // Check if we can finalize the instance size.
   4384     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
   4385     __ tst(r3, Operand(Map::ConstructionCounter::kMask));
   4386     __ Ret(ne);
   4387 
   4388     // Finalize the instance size.
   4389     {
   4390       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   4391       __ Push(r0, r2);
   4392       __ CallRuntime(Runtime::kFinalizeInstanceSize);
   4393       __ Pop(r0);
   4394     }
   4395     __ Ret();
   4396   }
   4397 
   4398   // Fall back to %AllocateInNewSpace.
   4399   __ bind(&allocate);
   4400   {
   4401     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   4402     STATIC_ASSERT(kSmiTag == 0);
   4403     STATIC_ASSERT(kSmiTagSize == 1);
   4404     __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
   4405     __ Push(r2, r4);
   4406     __ CallRuntime(Runtime::kAllocateInNewSpace);
   4407     __ Pop(r2);
   4408   }
   4409   __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
   4410   __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
   4411   STATIC_ASSERT(kHeapObjectTag == 1);
   4412   __ sub(r5, r5, Operand(kHeapObjectTag));
   4413   __ b(&done_allocate);
   4414 
   4415   // Fall back to %NewObject.
   4416   __ bind(&new_object);
   4417   __ Push(r1, r3);
   4418   __ TailCallRuntime(Runtime::kNewObject);
   4419 }
   4420 
   4421 
   4422 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
   4423   // ----------- S t a t e -------------
   4424   //  -- r1 : function
   4425   //  -- cp : context
   4426   //  -- fp : frame pointer
   4427   //  -- lr : return address
   4428   // -----------------------------------
   4429   __ AssertFunction(r1);
   4430 
   4431   // Make r2 point to the JavaScript frame.
   4432   __ mov(r2, fp);
   4433   if (skip_stub_frame()) {
   4434     // For Ignition we need to skip the handler/stub frame to reach the
   4435     // JavaScript frame for the function.
   4436     __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
   4437   }
   4438   if (FLAG_debug_code) {
   4439     Label ok;
   4440     __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
   4441     __ cmp(ip, r1);
   4442     __ b(eq, &ok);
   4443     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
   4444     __ bind(&ok);
   4445   }
   4446 
   4447   // Check if we have rest parameters (only possible if we have an
   4448   // arguments adaptor frame below the function frame).
   4449   Label no_rest_parameters;
   4450   __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
   4451   __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
   4452   __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   4453   __ b(ne, &no_rest_parameters);
   4454 
   4455   // Check if the arguments adaptor frame contains more arguments than
   4456   // specified by the function's internal formal parameter count.
   4457   Label rest_parameters;
   4458   __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
   4459   __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   4460   __ ldr(r3,
   4461          FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
   4462   __ sub(r0, r0, r3, SetCC);
   4463   __ b(gt, &rest_parameters);
   4464 
   4465   // Return an empty rest parameter array.
   4466   __ bind(&no_rest_parameters);
   4467   {
   4468     // ----------- S t a t e -------------
   4469     //  -- cp : context
   4470     //  -- lr : return address
   4471     // -----------------------------------
   4472 
   4473     // Allocate an empty rest parameter array.
   4474     Label allocate, done_allocate;
   4475     __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
   4476     __ bind(&done_allocate);
   4477 
   4478     // Setup the rest parameter array in r0.
   4479     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
   4480     __ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
   4481     __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
   4482     __ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
   4483     __ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
   4484     __ mov(r1, Operand(0));
   4485     __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
   4486     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
   4487     __ Ret();
   4488 
   4489     // Fall back to %AllocateInNewSpace.
   4490     __ bind(&allocate);
   4491     {
   4492       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   4493       __ Push(Smi::FromInt(JSArray::kSize));
   4494       __ CallRuntime(Runtime::kAllocateInNewSpace);
   4495     }
   4496     __ jmp(&done_allocate);
   4497   }
   4498 
   4499   __ bind(&rest_parameters);
   4500   {
   4501     // Compute the pointer to the first rest parameter (skippping the receiver).
   4502     __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4503     __ add(r2, r2,
   4504            Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
   4505 
   4506     // ----------- S t a t e -------------
   4507     //  -- cp : context
   4508     //  -- r0 : number of rest parameters (tagged)
   4509     //  -- r1 : function
   4510     //  -- r2 : pointer to first rest parameters
   4511     //  -- lr : return address
   4512     // -----------------------------------
   4513 
   4514     // Allocate space for the rest parameter array plus the backing store.
   4515     Label allocate, done_allocate;
   4516     __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
   4517     __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4518     __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
   4519     __ bind(&done_allocate);
   4520 
   4521     // Setup the elements array in r3.
   4522     __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
   4523     __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
   4524     __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
   4525     __ add(r4, r3, Operand(FixedArray::kHeaderSize));
   4526     {
   4527       Label loop, done_loop;
   4528       __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4529       __ bind(&loop);
   4530       __ cmp(r4, r1);
   4531       __ b(eq, &done_loop);
   4532       __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
   4533       __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
   4534       __ add(r4, r4, Operand(1 * kPointerSize));
   4535       __ b(&loop);
   4536       __ bind(&done_loop);
   4537     }
   4538 
   4539     // Setup the rest parameter array in r4.
   4540     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
   4541     __ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
   4542     __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
   4543     __ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
   4544     __ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
   4545     __ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
   4546     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
   4547     __ mov(r0, r4);
   4548     __ Ret();
   4549 
   4550     // Fall back to %AllocateInNewSpace (if not too big).
   4551     Label too_big_for_new_space;
   4552     __ bind(&allocate);
   4553     __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
   4554     __ b(gt, &too_big_for_new_space);
   4555     {
   4556       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   4557       __ SmiTag(r6);
   4558       __ Push(r0, r2, r6);
   4559       __ CallRuntime(Runtime::kAllocateInNewSpace);
   4560       __ mov(r3, r0);
   4561       __ Pop(r0, r2);
   4562     }
   4563     __ jmp(&done_allocate);
   4564 
   4565     // Fall back to %NewRestParameter.
   4566     __ bind(&too_big_for_new_space);
   4567     __ push(r1);
   4568     __ TailCallRuntime(Runtime::kNewRestParameter);
   4569   }
   4570 }
   4571 
   4572 
   4573 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
   4574   // ----------- S t a t e -------------
   4575   //  -- r1 : function
   4576   //  -- cp : context
   4577   //  -- fp : frame pointer
   4578   //  -- lr : return address
   4579   // -----------------------------------
   4580   __ AssertFunction(r1);
   4581 
   4582   // Make r9 point to the JavaScript frame.
   4583   __ mov(r9, fp);
   4584   if (skip_stub_frame()) {
   4585     // For Ignition we need to skip the handler/stub frame to reach the
   4586     // JavaScript frame for the function.
   4587     __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
   4588   }
   4589   if (FLAG_debug_code) {
   4590     Label ok;
   4591     __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
   4592     __ cmp(ip, r1);
   4593     __ b(eq, &ok);
   4594     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
   4595     __ bind(&ok);
   4596   }
   4597 
   4598   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   4599   __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   4600   __ ldr(r2,
   4601          FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
   4602   __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
   4603   __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
   4604 
   4605   // r1 : function
   4606   // r2 : number of parameters (tagged)
   4607   // r3 : parameters pointer
   4608   // r9 : JavaScript frame pointer
   4609   // Registers used over whole function:
   4610   //  r5 : arguments count (tagged)
   4611   //  r6 : mapped parameter count (tagged)
   4612 
   4613   // Check if the calling frame is an arguments adaptor frame.
   4614   Label adaptor_frame, try_allocate, runtime;
   4615   __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
   4616   __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
   4617   __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   4618   __ b(eq, &adaptor_frame);
   4619 
   4620   // No adaptor, parameter count = argument count.
   4621   __ mov(r5, r2);
   4622   __ mov(r6, r2);
   4623   __ b(&try_allocate);
   4624 
   4625   // We have an adaptor frame. Patch the parameters pointer.
   4626   __ bind(&adaptor_frame);
   4627   __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
   4628   __ add(r4, r4, Operand(r5, LSL, 1));
   4629   __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
   4630 
   4631   // r5 = argument count (tagged)
   4632   // r6 = parameter count (tagged)
   4633   // Compute the mapped parameter count = min(r6, r5) in r6.
   4634   __ mov(r6, r2);
   4635   __ cmp(r6, Operand(r5));
   4636   __ mov(r6, Operand(r5), LeaveCC, gt);
   4637 
   4638   __ bind(&try_allocate);
   4639 
   4640   // Compute the sizes of backing store, parameter map, and arguments object.
   4641   // 1. Parameter map, has 2 extra words containing context and backing store.
   4642   const int kParameterMapHeaderSize =
   4643       FixedArray::kHeaderSize + 2 * kPointerSize;
   4644   // If there are no mapped parameters, we do not need the parameter_map.
   4645   __ cmp(r6, Operand(Smi::FromInt(0)));
   4646   __ mov(r9, Operand::Zero(), LeaveCC, eq);
   4647   __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
   4648   __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
   4649 
   4650   // 2. Backing store.
   4651   __ add(r9, r9, Operand(r5, LSL, 1));
   4652   __ add(r9, r9, Operand(FixedArray::kHeaderSize));
   4653 
   4654   // 3. Arguments object.
   4655   __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
   4656 
   4657   // Do the allocation of all three objects in one go.
   4658   __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
   4659 
   4660   // r0 = address of new object(s) (tagged)
   4661   // r2 = argument count (smi-tagged)
   4662   // Get the arguments boilerplate from the current native context into r4.
   4663   const int kNormalOffset =
   4664       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
   4665   const int kAliasedOffset =
   4666       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
   4667 
   4668   __ ldr(r4, NativeContextMemOperand());
   4669   __ cmp(r6, Operand::Zero());
   4670   __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
   4671   __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
   4672 
   4673   // r0 = address of new object (tagged)
   4674   // r2 = argument count (smi-tagged)
   4675   // r4 = address of arguments map (tagged)
   4676   // r6 = mapped parameter count (tagged)
   4677   __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
   4678   __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
   4679   __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   4680   __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
   4681 
   4682   // Set up the callee in-object property.
   4683   __ AssertNotSmi(r1);
   4684   __ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
   4685 
   4686   // Use the length (smi tagged) and set that as an in-object property too.
   4687   __ AssertSmi(r5);
   4688   __ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
   4689 
   4690   // Set up the elements pointer in the allocated arguments object.
   4691   // If we allocated a parameter map, r4 will point there, otherwise
   4692   // it will point to the backing store.
   4693   __ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
   4694   __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
   4695 
   4696   // r0 = address of new object (tagged)
   4697   // r2 = argument count (tagged)
   4698   // r4 = address of parameter map or backing store (tagged)
   4699   // r6 = mapped parameter count (tagged)
   4700   // Initialize parameter map. If there are no mapped arguments, we're done.
   4701   Label skip_parameter_map;
   4702   __ cmp(r6, Operand(Smi::FromInt(0)));
   4703   // Move backing store address to r1, because it is
   4704   // expected there when filling in the unmapped arguments.
   4705   __ mov(r1, r4, LeaveCC, eq);
   4706   __ b(eq, &skip_parameter_map);
   4707 
   4708   __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
   4709   __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
   4710   __ add(r5, r6, Operand(Smi::FromInt(2)));
   4711   __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
   4712   __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
   4713   __ add(r5, r4, Operand(r6, LSL, 1));
   4714   __ add(r5, r5, Operand(kParameterMapHeaderSize));
   4715   __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
   4716 
   4717   // Copy the parameter slots and the holes in the arguments.
   4718   // We need to fill in mapped_parameter_count slots. They index the context,
   4719   // where parameters are stored in reverse order, at
   4720   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
   4721   // The mapped parameter thus need to get indices
   4722   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
   4723   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
   4724   // We loop from right to left.
   4725   Label parameters_loop, parameters_test;
   4726   __ mov(r5, r6);
   4727   __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
   4728   __ sub(r9, r9, Operand(r6));
   4729   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   4730   __ add(r1, r4, Operand(r5, LSL, 1));
   4731   __ add(r1, r1, Operand(kParameterMapHeaderSize));
   4732 
   4733   // r1 = address of backing store (tagged)
   4734   // r4 = address of parameter map (tagged), which is also the address of new
   4735   //      object + Heap::kSloppyArgumentsObjectSize (tagged)
   4736   // r0 = temporary scratch (a.o., for address calculation)
   4737   // r5 = loop variable (tagged)
   4738   // ip = the hole value
   4739   __ jmp(&parameters_test);
   4740 
   4741   __ bind(&parameters_loop);
   4742   __ sub(r5, r5, Operand(Smi::FromInt(1)));
   4743   __ mov(r0, Operand(r5, LSL, 1));
   4744   __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
   4745   __ str(r9, MemOperand(r4, r0));
   4746   __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
   4747   __ str(ip, MemOperand(r1, r0));
   4748   __ add(r9, r9, Operand(Smi::FromInt(1)));
   4749   __ bind(&parameters_test);
   4750   __ cmp(r5, Operand(Smi::FromInt(0)));
   4751   __ b(ne, &parameters_loop);
   4752 
   4753   // Restore r0 = new object (tagged) and r5 = argument count (tagged).
   4754   __ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
   4755   __ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
   4756 
   4757   __ bind(&skip_parameter_map);
   4758   // r0 = address of new object (tagged)
   4759   // r1 = address of backing store (tagged)
   4760   // r5 = argument count (tagged)
   4761   // r6 = mapped parameter count (tagged)
   4762   // r9 = scratch
   4763   // Copy arguments header and remaining slots (if there are any).
   4764   __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
   4765   __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
   4766   __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
   4767 
   4768   Label arguments_loop, arguments_test;
   4769   __ sub(r3, r3, Operand(r6, LSL, 1));
   4770   __ jmp(&arguments_test);
   4771 
   4772   __ bind(&arguments_loop);
   4773   __ sub(r3, r3, Operand(kPointerSize));
   4774   __ ldr(r4, MemOperand(r3, 0));
   4775   __ add(r9, r1, Operand(r6, LSL, 1));
   4776   __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
   4777   __ add(r6, r6, Operand(Smi::FromInt(1)));
   4778 
   4779   __ bind(&arguments_test);
   4780   __ cmp(r6, Operand(r5));
   4781   __ b(lt, &arguments_loop);
   4782 
   4783   // Return.
   4784   __ Ret();
   4785 
   4786   // Do the runtime call to allocate the arguments object.
   4787   // r0 = address of new object (tagged)
   4788   // r5 = argument count (tagged)
   4789   __ bind(&runtime);
   4790   __ Push(r1, r3, r5);
   4791   __ TailCallRuntime(Runtime::kNewSloppyArguments);
   4792 }
   4793 
   4794 
   4795 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
   4796   // ----------- S t a t e -------------
   4797   //  -- r1 : function
   4798   //  -- cp : context
   4799   //  -- fp : frame pointer
   4800   //  -- lr : return address
   4801   // -----------------------------------
   4802   __ AssertFunction(r1);
   4803 
   4804   // Make r2 point to the JavaScript frame.
   4805   __ mov(r2, fp);
   4806   if (skip_stub_frame()) {
   4807     // For Ignition we need to skip the handler/stub frame to reach the
   4808     // JavaScript frame for the function.
   4809     __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
   4810   }
   4811   if (FLAG_debug_code) {
   4812     Label ok;
   4813     __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
   4814     __ cmp(ip, r1);
   4815     __ b(eq, &ok);
   4816     __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
   4817     __ bind(&ok);
   4818   }
   4819 
   4820   // Check if we have an arguments adaptor frame below the function frame.
   4821   Label arguments_adaptor, arguments_done;
   4822   __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
   4823   __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
   4824   __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   4825   __ b(eq, &arguments_adaptor);
   4826   {
   4827     __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   4828     __ ldr(r0, FieldMemOperand(
   4829                    r4, SharedFunctionInfo::kFormalParameterCountOffset));
   4830     __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4831     __ add(r2, r2,
   4832            Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
   4833   }
   4834   __ b(&arguments_done);
   4835   __ bind(&arguments_adaptor);
   4836   {
   4837     __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
   4838     __ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4839     __ add(r2, r2,
   4840            Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
   4841   }
   4842   __ bind(&arguments_done);
   4843 
   4844   // ----------- S t a t e -------------
   4845   //  -- cp : context
   4846   //  -- r0 : number of rest parameters (tagged)
   4847   //  -- r1 : function
   4848   //  -- r2 : pointer to first rest parameters
   4849   //  -- lr : return address
   4850   // -----------------------------------
   4851 
   4852   // Allocate space for the strict arguments object plus the backing store.
   4853   Label allocate, done_allocate;
   4854   __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
   4855   __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4856   __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
   4857   __ bind(&done_allocate);
   4858 
   4859   // Setup the elements array in r3.
   4860   __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
   4861   __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
   4862   __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
   4863   __ add(r4, r3, Operand(FixedArray::kHeaderSize));
   4864   {
   4865     Label loop, done_loop;
   4866     __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
   4867     __ bind(&loop);
   4868     __ cmp(r4, r1);
   4869     __ b(eq, &done_loop);
   4870     __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
   4871     __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
   4872     __ add(r4, r4, Operand(1 * kPointerSize));
   4873     __ b(&loop);
   4874     __ bind(&done_loop);
   4875   }
   4876 
   4877   // Setup the strict arguments object in r4.
   4878   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
   4879   __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
   4880   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
   4881   __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
   4882   __ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
   4883   __ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
   4884   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
   4885   __ mov(r0, r4);
   4886   __ Ret();
   4887 
   4888   // Fall back to %AllocateInNewSpace (if not too big).
   4889   Label too_big_for_new_space;
   4890   __ bind(&allocate);
   4891   __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
   4892   __ b(gt, &too_big_for_new_space);
   4893   {
   4894     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   4895     __ SmiTag(r6);
   4896     __ Push(r0, r2, r6);
   4897     __ CallRuntime(Runtime::kAllocateInNewSpace);
   4898     __ mov(r3, r0);
   4899     __ Pop(r0, r2);
   4900   }
   4901   __ b(&done_allocate);
   4902 
   4903   // Fall back to %NewStrictArguments.
   4904   __ bind(&too_big_for_new_space);
   4905   __ push(r1);
   4906   __ TailCallRuntime(Runtime::kNewStrictArguments);
   4907 }
   4908 
   4909 
   4910 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
   4911   Register value = r0;
   4912   Register slot = r2;
   4913 
   4914   Register cell = r1;
   4915   Register cell_details = r4;
   4916   Register cell_value = r5;
   4917   Register cell_value_map = r6;
   4918   Register scratch = r9;
   4919 
   4920   Register context = cp;
   4921   Register context_temp = cell;
   4922 
   4923   Label fast_heapobject_case, fast_smi_case, slow_case;
   4924 
   4925   if (FLAG_debug_code) {
   4926     __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
   4927     __ Check(ne, kUnexpectedValue);
   4928   }
   4929 
   4930   // Go up the context chain to the script context.
   4931   for (int i = 0; i < depth(); i++) {
   4932     __ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
   4933     context = context_temp;
   4934   }
   4935 
   4936   // Load the PropertyCell at the specified slot.
   4937   __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
   4938   __ ldr(cell, ContextMemOperand(cell));
   4939 
   4940   // Load PropertyDetails for the cell (actually only the cell_type and kind).
   4941   __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
   4942   __ SmiUntag(cell_details);
   4943   __ and_(cell_details, cell_details,
   4944           Operand(PropertyDetails::PropertyCellTypeField::kMask |
   4945                   PropertyDetails::KindField::kMask |
   4946                   PropertyDetails::kAttributesReadOnlyMask));
   4947 
   4948   // Check if PropertyCell holds mutable data.
   4949   Label not_mutable_data;
   4950   __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   4951                                    PropertyCellType::kMutable) |
   4952                                PropertyDetails::KindField::encode(kData)));
   4953   __ b(ne, &not_mutable_data);
   4954   __ JumpIfSmi(value, &fast_smi_case);
   4955 
   4956   __ bind(&fast_heapobject_case);
   4957   __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   4958   // RecordWriteField clobbers the value register, so we copy it before the
   4959   // call.
   4960   __ mov(r4, Operand(value));
   4961   __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
   4962                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
   4963                       OMIT_SMI_CHECK);
   4964   __ Ret();
   4965 
   4966   __ bind(&not_mutable_data);
   4967   // Check if PropertyCell value matches the new value (relevant for Constant,
   4968   // ConstantType and Undefined cells).
   4969   Label not_same_value;
   4970   __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   4971   __ cmp(cell_value, value);
   4972   __ b(ne, &not_same_value);
   4973 
   4974   // Make sure the PropertyCell is not marked READ_ONLY.
   4975   __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
   4976   __ b(ne, &slow_case);
   4977 
   4978   if (FLAG_debug_code) {
   4979     Label done;
   4980     // This can only be true for Constant, ConstantType and Undefined cells,
   4981     // because we never store the_hole via this stub.
   4982     __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   4983                                      PropertyCellType::kConstant) |
   4984                                  PropertyDetails::KindField::encode(kData)));
   4985     __ b(eq, &done);
   4986     __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   4987                                      PropertyCellType::kConstantType) |
   4988                                  PropertyDetails::KindField::encode(kData)));
   4989     __ b(eq, &done);
   4990     __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   4991                                      PropertyCellType::kUndefined) |
   4992                                  PropertyDetails::KindField::encode(kData)));
   4993     __ Check(eq, kUnexpectedValue);
   4994     __ bind(&done);
   4995   }
   4996   __ Ret();
   4997   __ bind(&not_same_value);
   4998 
   4999   // Check if PropertyCell contains data with constant type (and is not
   5000   // READ_ONLY).
   5001   __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   5002                                    PropertyCellType::kConstantType) |
   5003                                PropertyDetails::KindField::encode(kData)));
   5004   __ b(ne, &slow_case);
   5005 
   5006   // Now either both old and new values must be smis or both must be heap
   5007   // objects with same map.
   5008   Label value_is_heap_object;
   5009   __ JumpIfNotSmi(value, &value_is_heap_object);
   5010   __ JumpIfNotSmi(cell_value, &slow_case);
   5011   // Old and new values are smis, no need for a write barrier here.
   5012   __ bind(&fast_smi_case);
   5013   __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   5014   __ Ret();
   5015 
   5016   __ bind(&value_is_heap_object);
   5017   __ JumpIfSmi(cell_value, &slow_case);
   5018 
   5019   __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
   5020   __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   5021   __ cmp(cell_value_map, scratch);
   5022   __ b(eq, &fast_heapobject_case);
   5023 
   5024   // Fallback to runtime.
   5025   __ bind(&slow_case);
   5026   __ SmiTag(slot);
   5027   __ Push(slot, value);
   5028   __ TailCallRuntime(is_strict(language_mode())
   5029                          ? Runtime::kStoreGlobalViaContext_Strict
   5030                          : Runtime::kStoreGlobalViaContext_Sloppy);
   5031 }
   5032 
   5033 
   5034 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   5035   return ref0.address() - ref1.address();
   5036 }
   5037 
   5038 
   5039 // Calls an API function.  Allocates HandleScope, extracts returned value
   5040 // from handle and propagates exceptions.  Restores context.  stack_space
   5041 // - space to be unwound on exit (includes the call JS arguments space and
   5042 // the additional space allocated for the fast call).
   5043 static void CallApiFunctionAndReturn(MacroAssembler* masm,
   5044                                      Register function_address,
   5045                                      ExternalReference thunk_ref,
   5046                                      int stack_space,
   5047                                      MemOperand* stack_space_operand,
   5048                                      MemOperand return_value_operand,
   5049                                      MemOperand* context_restore_operand) {
   5050   Isolate* isolate = masm->isolate();
   5051   ExternalReference next_address =
   5052       ExternalReference::handle_scope_next_address(isolate);
   5053   const int kNextOffset = 0;
   5054   const int kLimitOffset = AddressOffset(
   5055       ExternalReference::handle_scope_limit_address(isolate), next_address);
   5056   const int kLevelOffset = AddressOffset(
   5057       ExternalReference::handle_scope_level_address(isolate), next_address);
   5058 
   5059   DCHECK(function_address.is(r1) || function_address.is(r2));
   5060 
   5061   Label profiler_disabled;
   5062   Label end_profiler_check;
   5063   __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
   5064   __ ldrb(r9, MemOperand(r9, 0));
   5065   __ cmp(r9, Operand(0));
   5066   __ b(eq, &profiler_disabled);
   5067 
   5068   // Additional parameter is the address of the actual callback.
   5069   __ mov(r3, Operand(thunk_ref));
   5070   __ jmp(&end_profiler_check);
   5071 
   5072   __ bind(&profiler_disabled);
   5073   __ Move(r3, function_address);
   5074   __ bind(&end_profiler_check);
   5075 
   5076   // Allocate HandleScope in callee-save registers.
   5077   __ mov(r9, Operand(next_address));
   5078   __ ldr(r4, MemOperand(r9, kNextOffset));
   5079   __ ldr(r5, MemOperand(r9, kLimitOffset));
   5080   __ ldr(r6, MemOperand(r9, kLevelOffset));
   5081   __ add(r6, r6, Operand(1));
   5082   __ str(r6, MemOperand(r9, kLevelOffset));
   5083 
   5084   if (FLAG_log_timer_events) {
   5085     FrameScope frame(masm, StackFrame::MANUAL);
   5086     __ PushSafepointRegisters();
   5087     __ PrepareCallCFunction(1, r0);
   5088     __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
   5089     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
   5090                      1);
   5091     __ PopSafepointRegisters();
   5092   }
   5093 
   5094   // Native call returns to the DirectCEntry stub which redirects to the
   5095   // return address pushed on stack (could have moved after GC).
   5096   // DirectCEntry stub itself is generated early and never moves.
   5097   DirectCEntryStub stub(isolate);
   5098   stub.GenerateCall(masm, r3);
   5099 
   5100   if (FLAG_log_timer_events) {
   5101     FrameScope frame(masm, StackFrame::MANUAL);
   5102     __ PushSafepointRegisters();
   5103     __ PrepareCallCFunction(1, r0);
   5104     __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
   5105     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
   5106                      1);
   5107     __ PopSafepointRegisters();
   5108   }
   5109 
   5110   Label promote_scheduled_exception;
   5111   Label delete_allocated_handles;
   5112   Label leave_exit_frame;
   5113   Label return_value_loaded;
   5114 
   5115   // load value from ReturnValue
   5116   __ ldr(r0, return_value_operand);
   5117   __ bind(&return_value_loaded);
   5118   // No more valid handles (the result handle was the last one). Restore
   5119   // previous handle scope.
   5120   __ str(r4, MemOperand(r9, kNextOffset));
   5121   if (__ emit_debug_code()) {
   5122     __ ldr(r1, MemOperand(r9, kLevelOffset));
   5123     __ cmp(r1, r6);
   5124     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
   5125   }
   5126   __ sub(r6, r6, Operand(1));
   5127   __ str(r6, MemOperand(r9, kLevelOffset));
   5128   __ ldr(ip, MemOperand(r9, kLimitOffset));
   5129   __ cmp(r5, ip);
   5130   __ b(ne, &delete_allocated_handles);
   5131 
   5132   // Leave the API exit frame.
   5133   __ bind(&leave_exit_frame);
   5134   bool restore_context = context_restore_operand != NULL;
   5135   if (restore_context) {
   5136     __ ldr(cp, *context_restore_operand);
   5137   }
   5138   // LeaveExitFrame expects unwind space to be in a register.
   5139   if (stack_space_operand != NULL) {
   5140     __ ldr(r4, *stack_space_operand);
   5141   } else {
   5142     __ mov(r4, Operand(stack_space));
   5143   }
   5144   __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
   5145 
   5146   // Check if the function scheduled an exception.
   5147   __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
   5148   __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
   5149   __ ldr(r5, MemOperand(ip));
   5150   __ cmp(r4, r5);
   5151   __ b(ne, &promote_scheduled_exception);
   5152 
   5153   __ mov(pc, lr);
   5154 
   5155   // Re-throw by promoting a scheduled exception.
   5156   __ bind(&promote_scheduled_exception);
   5157   __ TailCallRuntime(Runtime::kPromoteScheduledException);
   5158 
   5159   // HandleScope limit has changed. Delete allocated extensions.
   5160   __ bind(&delete_allocated_handles);
   5161   __ str(r5, MemOperand(r9, kLimitOffset));
   5162   __ mov(r4, r0);
   5163   __ PrepareCallCFunction(1, r5);
   5164   __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
   5165   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
   5166                    1);
   5167   __ mov(r0, r4);
   5168   __ jmp(&leave_exit_frame);
   5169 }
   5170 
   5171 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   5172   // ----------- S t a t e -------------
   5173   //  -- r0                  : callee
   5174   //  -- r4                  : call_data
   5175   //  -- r2                  : holder
   5176   //  -- r1                  : api_function_address
   5177   //  -- cp                  : context
   5178   //  --
   5179   //  -- sp[0]               : last argument
   5180   //  -- ...
   5181   //  -- sp[(argc - 1)* 4]   : first argument
   5182   //  -- sp[argc * 4]        : receiver
   5183   // -----------------------------------
   5184 
   5185   Register callee = r0;
   5186   Register call_data = r4;
   5187   Register holder = r2;
   5188   Register api_function_address = r1;
   5189   Register context = cp;
   5190 
   5191   typedef FunctionCallbackArguments FCA;
   5192 
   5193   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   5194   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   5195   STATIC_ASSERT(FCA::kDataIndex == 4);
   5196   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   5197   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   5198   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   5199   STATIC_ASSERT(FCA::kHolderIndex == 0);
   5200   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
   5201   STATIC_ASSERT(FCA::kArgsLength == 8);
   5202 
   5203   // new target
   5204   __ PushRoot(Heap::kUndefinedValueRootIndex);
   5205 
   5206   // context save
   5207   __ push(context);
   5208   if (!is_lazy()) {
   5209     // load context from callee
   5210     __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   5211   }
   5212 
   5213   // callee
   5214   __ push(callee);
   5215 
   5216   // call data
   5217   __ push(call_data);
   5218 
   5219   Register scratch = call_data;
   5220   if (!call_data_undefined()) {
   5221     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   5222   }
   5223   // return value
   5224   __ push(scratch);
   5225   // return value default
   5226   __ push(scratch);
   5227   // isolate
   5228   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
   5229   __ push(scratch);
   5230   // holder
   5231   __ push(holder);
   5232 
   5233   // Prepare arguments.
   5234   __ mov(scratch, sp);
   5235 
   5236   // Allocate the v8::Arguments structure in the arguments' space since
   5237   // it's not controlled by GC.
   5238   const int kApiStackSpace = 3;
   5239 
   5240   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5241   __ EnterExitFrame(false, kApiStackSpace);
   5242 
   5243   DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
   5244   // r0 = FunctionCallbackInfo&
   5245   // Arguments is after the return address.
   5246   __ add(r0, sp, Operand(1 * kPointerSize));
   5247   // FunctionCallbackInfo::implicit_args_
   5248   __ str(scratch, MemOperand(r0, 0 * kPointerSize));
   5249   // FunctionCallbackInfo::values_
   5250   __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
   5251   __ str(ip, MemOperand(r0, 1 * kPointerSize));
   5252   // FunctionCallbackInfo::length_ = argc
   5253   __ mov(ip, Operand(argc()));
   5254   __ str(ip, MemOperand(r0, 2 * kPointerSize));
   5255 
   5256   ExternalReference thunk_ref =
   5257       ExternalReference::invoke_function_callback(masm->isolate());
   5258 
   5259   AllowExternalCallThatCantCauseGC scope(masm);
   5260   MemOperand context_restore_operand(
   5261       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   5262   // Stores return the first js argument
   5263   int return_value_offset = 0;
   5264   if (is_store()) {
   5265     return_value_offset = 2 + FCA::kArgsLength;
   5266   } else {
   5267     return_value_offset = 2 + FCA::kReturnValueOffset;
   5268   }
   5269   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   5270   int stack_space = 0;
   5271   MemOperand length_operand = MemOperand(sp, 3 * kPointerSize);
   5272   MemOperand* stack_space_operand = &length_operand;
   5273   stack_space = argc() + FCA::kArgsLength + 1;
   5274   stack_space_operand = NULL;
   5275 
   5276   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
   5277                            stack_space_operand, return_value_operand,
   5278                            &context_restore_operand);
   5279 }
   5280 
   5281 
   5282 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   5283   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
   5284   // name below the exit frame to make GC aware of them.
   5285   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
   5286   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
   5287   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
   5288   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
   5289   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
   5290   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
   5291   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
   5292   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
   5293 
   5294   Register receiver = ApiGetterDescriptor::ReceiverRegister();
   5295   Register holder = ApiGetterDescriptor::HolderRegister();
   5296   Register callback = ApiGetterDescriptor::CallbackRegister();
   5297   Register scratch = r4;
   5298   DCHECK(!AreAliased(receiver, holder, callback, scratch));
   5299 
   5300   Register api_function_address = r2;
   5301 
   5302   __ push(receiver);
   5303   // Push data from AccessorInfo.
   5304   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
   5305   __ push(scratch);
   5306   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   5307   __ Push(scratch, scratch);
   5308   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
   5309   __ Push(scratch, holder);
   5310   __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
   5311   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   5312   __ push(scratch);
   5313   // v8::PropertyCallbackInfo::args_ array and name handle.
   5314   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   5315 
   5316   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
   5317   __ mov(r0, sp);                             // r0 = Handle<Name>
   5318   __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = v8::PCI::args_
   5319 
   5320   const int kApiStackSpace = 1;
   5321   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5322   __ EnterExitFrame(false, kApiStackSpace);
   5323 
   5324   // Create v8::PropertyCallbackInfo object on the stack and initialize
   5325   // it's args_ field.
   5326   __ str(r1, MemOperand(sp, 1 * kPointerSize));
   5327   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = v8::PropertyCallbackInfo&
   5328 
   5329   ExternalReference thunk_ref =
   5330       ExternalReference::invoke_accessor_getter_callback(isolate());
   5331 
   5332   __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
   5333   __ ldr(api_function_address,
   5334          FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
   5335 
   5336   // +3 is to skip prolog, return address and name handle.
   5337   MemOperand return_value_operand(
   5338       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   5339   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
   5340                            kStackUnwindSpace, NULL, return_value_operand, NULL);
   5341 }
   5342 
   5343 #undef __
   5344 
   5345 }  // namespace internal
   5346 }  // namespace v8
   5347 
   5348 #endif  // V8_TARGET_ARCH_ARM
   5349