Home | History | Annotate | Download | only in mips64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_MIPS64
      6 
      7 #include "src/bootstrapper.h"
      8 #include "src/code-stubs.h"
      9 #include "src/codegen.h"
     10 #include "src/ic/handler-compiler.h"
     11 #include "src/ic/ic.h"
     12 #include "src/ic/stub-cache.h"
     13 #include "src/isolate.h"
     14 #include "src/mips64/code-stubs-mips64.h"
     15 #include "src/regexp/jsregexp.h"
     16 #include "src/regexp/regexp-macro-assembler.h"
     17 #include "src/runtime/runtime.h"
     18 
     19 namespace v8 {
     20 namespace internal {
     21 
     22 
     23 static void InitializeArrayConstructorDescriptor(
     24     Isolate* isolate, CodeStubDescriptor* descriptor,
     25     int constant_stack_parameter_count) {
     26   Address deopt_handler = Runtime::FunctionForId(
     27       Runtime::kArrayConstructor)->entry;
     28 
     29   if (constant_stack_parameter_count == 0) {
     30     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
     31                            JS_FUNCTION_STUB_MODE);
     32   } else {
     33     descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
     34                            JS_FUNCTION_STUB_MODE);
     35   }
     36 }
     37 
     38 
     39 static void InitializeInternalArrayConstructorDescriptor(
     40     Isolate* isolate, CodeStubDescriptor* descriptor,
     41     int constant_stack_parameter_count) {
     42   Address deopt_handler = Runtime::FunctionForId(
     43       Runtime::kInternalArrayConstructor)->entry;
     44 
     45   if (constant_stack_parameter_count == 0) {
     46     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
     47                            JS_FUNCTION_STUB_MODE);
     48   } else {
     49     descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
     50                            JS_FUNCTION_STUB_MODE);
     51   }
     52 }
     53 
     54 
     55 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
     56     CodeStubDescriptor* descriptor) {
     57   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
     58 }
     59 
     60 
     61 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     62     CodeStubDescriptor* descriptor) {
     63   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
     64 }
     65 
     66 
     67 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
     68     CodeStubDescriptor* descriptor) {
     69   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
     70 }
     71 
     72 
     73 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
     74     CodeStubDescriptor* descriptor) {
     75   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
     76 }
     77 
     78 
     79 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     80     CodeStubDescriptor* descriptor) {
     81   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
     82 }
     83 
     84 
     85 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
     86     CodeStubDescriptor* descriptor) {
     87   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
     88 }
     89 
     90 
     91 #define __ ACCESS_MASM(masm)
     92 
     93 
     94 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
     95                                           Condition cc, Strength strength);
     96 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
     97                                     Register lhs,
     98                                     Register rhs,
     99                                     Label* rhs_not_nan,
    100                                     Label* slow,
    101                                     bool strict);
    102 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
    103                                            Register lhs,
    104                                            Register rhs);
    105 
    106 
    107 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
    108                                                ExternalReference miss) {
    109   // Update the static counter each time a new code stub is generated.
    110   isolate()->counters()->code_stubs()->Increment();
    111 
    112   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
    113   int param_count = descriptor.GetRegisterParameterCount();
    114   {
    115     // Call the runtime system in a fresh internal frame.
    116     FrameScope scope(masm, StackFrame::INTERNAL);
    117     DCHECK((param_count == 0) ||
    118            a0.is(descriptor.GetRegisterParameter(param_count - 1)));
    119     // Push arguments, adjust sp.
    120     __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
    121     for (int i = 0; i < param_count; ++i) {
    122       // Store argument to stack.
    123       __ sd(descriptor.GetRegisterParameter(i),
    124             MemOperand(sp, (param_count - 1 - i) * kPointerSize));
    125     }
    126     __ CallExternalReference(miss, param_count);
    127   }
    128 
    129   __ Ret();
    130 }
    131 
    132 
    133 void DoubleToIStub::Generate(MacroAssembler* masm) {
    134   Label out_of_range, only_low, negate, done;
    135   Register input_reg = source();
    136   Register result_reg = destination();
    137 
    138   int double_offset = offset();
    139   // Account for saved regs if input is sp.
    140   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
    141 
    142   Register scratch =
    143       GetRegisterThatIsNotOneOf(input_reg, result_reg);
    144   Register scratch2 =
    145       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
    146   Register scratch3 =
    147       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
    148   DoubleRegister double_scratch = kLithiumScratchDouble;
    149 
    150   __ Push(scratch, scratch2, scratch3);
    151   if (!skip_fastpath()) {
    152     // Load double input.
    153     __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
    154 
    155     // Clear cumulative exception flags and save the FCSR.
    156     __ cfc1(scratch2, FCSR);
    157     __ ctc1(zero_reg, FCSR);
    158 
    159     // Try a conversion to a signed integer.
    160     __ Trunc_w_d(double_scratch, double_scratch);
    161     // Move the converted value into the result register.
    162     __ mfc1(scratch3, double_scratch);
    163 
    164     // Retrieve and restore the FCSR.
    165     __ cfc1(scratch, FCSR);
    166     __ ctc1(scratch2, FCSR);
    167 
    168     // Check for overflow and NaNs.
    169     __ And(
    170         scratch, scratch,
    171         kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
    172            | kFCSRInvalidOpFlagMask);
    173     // If we had no exceptions then set result_reg and we are done.
    174     Label error;
    175     __ Branch(&error, ne, scratch, Operand(zero_reg));
    176     __ Move(result_reg, scratch3);
    177     __ Branch(&done);
    178     __ bind(&error);
    179   }
    180 
    181   // Load the double value and perform a manual truncation.
    182   Register input_high = scratch2;
    183   Register input_low = scratch3;
    184 
    185   __ lw(input_low,
    186         MemOperand(input_reg, double_offset + Register::kMantissaOffset));
    187   __ lw(input_high,
    188         MemOperand(input_reg, double_offset + Register::kExponentOffset));
    189 
    190   Label normal_exponent, restore_sign;
    191   // Extract the biased exponent in result.
    192   __ Ext(result_reg,
    193          input_high,
    194          HeapNumber::kExponentShift,
    195          HeapNumber::kExponentBits);
    196 
    197   // Check for Infinity and NaNs, which should return 0.
    198   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
    199   __ Movz(result_reg, zero_reg, scratch);
    200   __ Branch(&done, eq, scratch, Operand(zero_reg));
    201 
    202   // Express exponent as delta to (number of mantissa bits + 31).
    203   __ Subu(result_reg,
    204           result_reg,
    205           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
    206 
    207   // If the delta is strictly positive, all bits would be shifted away,
    208   // which means that we can return 0.
    209   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
    210   __ mov(result_reg, zero_reg);
    211   __ Branch(&done);
    212 
    213   __ bind(&normal_exponent);
    214   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
    215   // Calculate shift.
    216   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
    217 
    218   // Save the sign.
    219   Register sign = result_reg;
    220   result_reg = no_reg;
    221   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
    222 
    223   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
    224   // to check for this specific case.
    225   Label high_shift_needed, high_shift_done;
    226   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
    227   __ mov(input_high, zero_reg);
    228   __ Branch(&high_shift_done);
    229   __ bind(&high_shift_needed);
    230 
    231   // Set the implicit 1 before the mantissa part in input_high.
    232   __ Or(input_high,
    233         input_high,
    234         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
    235   // Shift the mantissa bits to the correct position.
    236   // We don't need to clear non-mantissa bits as they will be shifted away.
    237   // If they weren't, it would mean that the answer is in the 32bit range.
    238   __ sllv(input_high, input_high, scratch);
    239 
    240   __ bind(&high_shift_done);
    241 
    242   // Replace the shifted bits with bits from the lower mantissa word.
    243   Label pos_shift, shift_done;
    244   __ li(at, 32);
    245   __ subu(scratch, at, scratch);
    246   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
    247 
    248   // Negate scratch.
    249   __ Subu(scratch, zero_reg, scratch);
    250   __ sllv(input_low, input_low, scratch);
    251   __ Branch(&shift_done);
    252 
    253   __ bind(&pos_shift);
    254   __ srlv(input_low, input_low, scratch);
    255 
    256   __ bind(&shift_done);
    257   __ Or(input_high, input_high, Operand(input_low));
    258   // Restore sign if necessary.
    259   __ mov(scratch, sign);
    260   result_reg = sign;
    261   sign = no_reg;
    262   __ Subu(result_reg, zero_reg, input_high);
    263   __ Movz(result_reg, input_high, scratch);
    264 
    265   __ bind(&done);
    266 
    267   __ Pop(scratch, scratch2, scratch3);
    268   __ Ret();
    269 }
    270 
    271 
    272 // Handle the case where the lhs and rhs are the same object.
    273 // Equality is almost reflexive (everything but NaN), so this is a test
    274 // for "identity and not NaN".
    275 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
    276                                           Condition cc, Strength strength) {
    277   Label not_identical;
    278   Label heap_number, return_equal;
    279   Register exp_mask_reg = t1;
    280 
    281   __ Branch(&not_identical, ne, a0, Operand(a1));
    282 
    283   __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
    284 
    285   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
    286   // so we do the second best thing - test it ourselves.
    287   // They are both equal and they are not both Smis so both of them are not
    288   // Smis. If it's not a heap number, then return equal.
    289   __ GetObjectType(a0, t0, t0);
    290   if (cc == less || cc == greater) {
    291     // Call runtime on identical JSObjects.
    292     __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
    293     // Call runtime on identical symbols since we need to throw a TypeError.
    294     __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
    295     // Call runtime on identical SIMD values since we must throw a TypeError.
    296     __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
    297     if (is_strong(strength)) {
    298       // Call the runtime on anything that is converted in the semantics, since
    299       // we need to throw a TypeError. Smis have already been ruled out.
    300       __ Branch(&return_equal, eq, t0, Operand(HEAP_NUMBER_TYPE));
    301       __ And(t0, t0, Operand(kIsNotStringMask));
    302       __ Branch(slow, ne, t0, Operand(zero_reg));
    303     }
    304   } else {
    305     __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
    306     // Comparing JS objects with <=, >= is complicated.
    307     if (cc != eq) {
    308       __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
    309       // Call runtime on identical symbols since we need to throw a TypeError.
    310       __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
    311       // Call runtime on identical SIMD values since we must throw a TypeError.
    312       __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
    313       if (is_strong(strength)) {
    314         // Call the runtime on anything that is converted in the semantics,
    315         // since we need to throw a TypeError. Smis and heap numbers have
    316         // already been ruled out.
    317         __ And(t0, t0, Operand(kIsNotStringMask));
    318         __ Branch(slow, ne, t0, Operand(zero_reg));
    319       }
    320       // Normally here we fall through to return_equal, but undefined is
    321       // special: (undefined == undefined) == true, but
    322       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    323       if (cc == less_equal || cc == greater_equal) {
    324         __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
    325         __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
    326         __ Branch(&return_equal, ne, a0, Operand(a6));
    327         DCHECK(is_int16(GREATER) && is_int16(LESS));
    328         __ Ret(USE_DELAY_SLOT);
    329         if (cc == le) {
    330           // undefined <= undefined should fail.
    331           __ li(v0, Operand(GREATER));
    332         } else  {
    333           // undefined >= undefined should fail.
    334           __ li(v0, Operand(LESS));
    335         }
    336       }
    337     }
    338   }
    339 
    340   __ bind(&return_equal);
    341   DCHECK(is_int16(GREATER) && is_int16(LESS));
    342   __ Ret(USE_DELAY_SLOT);
    343   if (cc == less) {
    344     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
    345   } else if (cc == greater) {
    346     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
    347   } else {
    348     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
    349   }
    350   // For less and greater we don't have to check for NaN since the result of
    351   // x < x is false regardless.  For the others here is some code to check
    352   // for NaN.
    353   if (cc != lt && cc != gt) {
    354     __ bind(&heap_number);
    355     // It is a heap number, so return non-equal if it's NaN and equal if it's
    356     // not NaN.
    357 
    358     // The representation of NaN values has all exponent bits (52..62) set,
    359     // and not all mantissa bits (0..51) clear.
    360     // Read top bits of double representation (second word of value).
    361     __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
    362     // Test that exponent bits are all set.
    363     __ And(a7, a6, Operand(exp_mask_reg));
    364     // If all bits not set (ne cond), then not a NaN, objects are equal.
    365     __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
    366 
    367     // Shift out flag and all exponent bits, retaining only mantissa.
    368     __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
    369     // Or with all low-bits of mantissa.
    370     __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
    371     __ Or(v0, a7, Operand(a6));
    372     // For equal we already have the right value in v0:  Return zero (equal)
    373     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
    374     // not (it's a NaN).  For <= and >= we need to load v0 with the failing
    375     // value if it's a NaN.
    376     if (cc != eq) {
    377       // All-zero means Infinity means equal.
    378       __ Ret(eq, v0, Operand(zero_reg));
    379       DCHECK(is_int16(GREATER) && is_int16(LESS));
    380       __ Ret(USE_DELAY_SLOT);
    381       if (cc == le) {
    382         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
    383       } else {
    384         __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
    385       }
    386     }
    387   }
    388   // No fall through here.
    389 
    390   __ bind(&not_identical);
    391 }
    392 
    393 
    394 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
    395                                     Register lhs,
    396                                     Register rhs,
    397                                     Label* both_loaded_as_doubles,
    398                                     Label* slow,
    399                                     bool strict) {
    400   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
    401          (lhs.is(a1) && rhs.is(a0)));
    402 
    403   Label lhs_is_smi;
    404   __ JumpIfSmi(lhs, &lhs_is_smi);
    405   // Rhs is a Smi.
    406   // Check whether the non-smi is a heap number.
    407   __ GetObjectType(lhs, t0, t0);
    408   if (strict) {
    409     // If lhs was not a number and rhs was a Smi then strict equality cannot
    410     // succeed. Return non-equal (lhs is already not zero).
    411     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
    412     __ mov(v0, lhs);
    413   } else {
    414     // Smi compared non-strictly with a non-Smi non-heap-number. Call
    415     // the runtime.
    416     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
    417   }
    418   // Rhs is a smi, lhs is a number.
    419   // Convert smi rhs to double.
    420   __ SmiUntag(at, rhs);
    421   __ mtc1(at, f14);
    422   __ cvt_d_w(f14, f14);
    423   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    424 
    425   // We now have both loaded as doubles.
    426   __ jmp(both_loaded_as_doubles);
    427 
    428   __ bind(&lhs_is_smi);
    429   // Lhs is a Smi.  Check whether the non-smi is a heap number.
    430   __ GetObjectType(rhs, t0, t0);
    431   if (strict) {
    432     // If lhs was not a number and rhs was a Smi then strict equality cannot
    433     // succeed. Return non-equal.
    434     __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
    435     __ li(v0, Operand(1));
    436   } else {
    437     // Smi compared non-strictly with a non-Smi non-heap-number. Call
    438     // the runtime.
    439     __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
    440   }
    441 
    442   // Lhs is a smi, rhs is a number.
    443   // Convert smi lhs to double.
    444   __ SmiUntag(at, lhs);
    445   __ mtc1(at, f12);
    446   __ cvt_d_w(f12, f12);
    447   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    448   // Fall through to both_loaded_as_doubles.
    449 }
    450 
    451 
    452 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
    453                                            Register lhs,
    454                                            Register rhs) {
    455     // If either operand is a JS object or an oddball value, then they are
    456     // not equal since their pointers are different.
    457     // There is no test for undetectability in strict equality.
    458     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    459     Label first_non_object;
    460     // Get the type of the first operand into a2 and compare it with
    461     // FIRST_JS_RECEIVER_TYPE.
    462     __ GetObjectType(lhs, a2, a2);
    463     __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
    464 
    465     // Return non-zero.
    466     Label return_not_equal;
    467     __ bind(&return_not_equal);
    468     __ Ret(USE_DELAY_SLOT);
    469     __ li(v0, Operand(1));
    470 
    471     __ bind(&first_non_object);
    472     // Check for oddballs: true, false, null, undefined.
    473     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
    474 
    475     __ GetObjectType(rhs, a3, a3);
    476     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
    477 
    478     // Check for oddballs: true, false, null, undefined.
    479     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
    480 
    481     // Now that we have the types we might as well check for
    482     // internalized-internalized.
    483     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    484     __ Or(a2, a2, Operand(a3));
    485     __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
    486     __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
    487 }
    488 
    489 
    490 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
    491                                        Register lhs,
    492                                        Register rhs,
    493                                        Label* both_loaded_as_doubles,
    494                                        Label* not_heap_numbers,
    495                                        Label* slow) {
    496   __ GetObjectType(lhs, a3, a2);
    497   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
    498   __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
    499   // If first was a heap number & second wasn't, go to slow case.
    500   __ Branch(slow, ne, a3, Operand(a2));
    501 
    502   // Both are heap numbers. Load them up then jump to the code we have
    503   // for that.
    504   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    505   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    506 
    507   __ jmp(both_loaded_as_doubles);
    508 }
    509 
    510 
    511 // Fast negative check for internalized-to-internalized equality.
    512 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
    513                                                      Register lhs,
    514                                                      Register rhs,
    515                                                      Label* possible_strings,
    516                                                      Label* not_both_strings) {
    517   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
    518          (lhs.is(a1) && rhs.is(a0)));
    519 
    520   // a2 is object type of rhs.
    521   Label object_test;
    522   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    523   __ And(at, a2, Operand(kIsNotStringMask));
    524   __ Branch(&object_test, ne, at, Operand(zero_reg));
    525   __ And(at, a2, Operand(kIsNotInternalizedMask));
    526   __ Branch(possible_strings, ne, at, Operand(zero_reg));
    527   __ GetObjectType(rhs, a3, a3);
    528   __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
    529   __ And(at, a3, Operand(kIsNotInternalizedMask));
    530   __ Branch(possible_strings, ne, at, Operand(zero_reg));
    531 
    532   // Both are internalized strings. We already checked they weren't the same
    533   // pointer so they are not equal.
    534   __ Ret(USE_DELAY_SLOT);
    535   __ li(v0, Operand(1));   // Non-zero indicates not equal.
    536 
    537   __ bind(&object_test);
    538   __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
    539   __ GetObjectType(rhs, a2, a3);
    540   __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
    541 
    542   // If both objects are undetectable, they are equal.  Otherwise, they
    543   // are not equal, since they are different objects and an object is not
    544   // equal to undefined.
    545   __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
    546   __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
    547   __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
    548   __ and_(a0, a2, a3);
    549   __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
    550   __ Ret(USE_DELAY_SLOT);
    551   __ xori(v0, a0, 1 << Map::kIsUndetectable);
    552 }
    553 
    554 
    555 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
    556                                          Register scratch,
    557                                          CompareICState::State expected,
    558                                          Label* fail) {
    559   Label ok;
    560   if (expected == CompareICState::SMI) {
    561     __ JumpIfNotSmi(input, fail);
    562   } else if (expected == CompareICState::NUMBER) {
    563     __ JumpIfSmi(input, &ok);
    564     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
    565                 DONT_DO_SMI_CHECK);
    566   }
    567   // We could be strict about internalized/string here, but as long as
    568   // hydrogen doesn't care, the stub doesn't have to care either.
    569   __ bind(&ok);
    570 }
    571 
    572 
    573 // On entry a1 and a2 are the values to be compared.
    574 // On exit a0 is 0, positive or negative to indicate the result of
    575 // the comparison.
    576 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
    577   Register lhs = a1;
    578   Register rhs = a0;
    579   Condition cc = GetCondition();
    580 
    581   Label miss;
    582   CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
    583   CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
    584 
    585   Label slow;  // Call builtin.
    586   Label not_smis, both_loaded_as_doubles;
    587 
    588   Label not_two_smis, smi_done;
    589   __ Or(a2, a1, a0);
    590   __ JumpIfNotSmi(a2, &not_two_smis);
    591   __ SmiUntag(a1);
    592   __ SmiUntag(a0);
    593 
    594   __ Ret(USE_DELAY_SLOT);
    595   __ dsubu(v0, a1, a0);
    596   __ bind(&not_two_smis);
    597 
    598   // NOTICE! This code is only reached after a smi-fast-case check, so
    599   // it is certain that at least one operand isn't a smi.
    600 
    601   // Handle the case where the objects are identical.  Either returns the answer
    602   // or goes to slow.  Only falls through if the objects were not identical.
    603   EmitIdenticalObjectComparison(masm, &slow, cc, strength());
    604 
    605   // If either is a Smi (we know that not both are), then they can only
    606   // be strictly equal if the other is a HeapNumber.
    607   STATIC_ASSERT(kSmiTag == 0);
    608   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
    609   __ And(a6, lhs, Operand(rhs));
    610   __ JumpIfNotSmi(a6, &not_smis, a4);
    611   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
    612   // 1) Return the answer.
    613   // 2) Go to slow.
    614   // 3) Fall through to both_loaded_as_doubles.
    615   // 4) Jump to rhs_not_nan.
    616   // In cases 3 and 4 we have found out we were dealing with a number-number
    617   // comparison and the numbers have been loaded into f12 and f14 as doubles,
    618   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
    619   EmitSmiNonsmiComparison(masm, lhs, rhs,
    620                           &both_loaded_as_doubles, &slow, strict());
    621 
    622   __ bind(&both_loaded_as_doubles);
    623   // f12, f14 are the double representations of the left hand side
    624   // and the right hand side if we have FPU. Otherwise a2, a3 represent
    625   // left hand side and a0, a1 represent right hand side.
    626 
    627   Label nan;
    628   __ li(a4, Operand(LESS));
    629   __ li(a5, Operand(GREATER));
    630   __ li(a6, Operand(EQUAL));
    631 
    632   // Check if either rhs or lhs is NaN.
    633   __ BranchF(NULL, &nan, eq, f12, f14);
    634 
    635   // Check if LESS condition is satisfied. If true, move conditionally
    636   // result to v0.
    637   if (kArchVariant != kMips64r6) {
    638     __ c(OLT, D, f12, f14);
    639     __ Movt(v0, a4);
    640     // Use previous check to store conditionally to v0 oposite condition
    641     // (GREATER). If rhs is equal to lhs, this will be corrected in next
    642     // check.
    643     __ Movf(v0, a5);
    644     // Check if EQUAL condition is satisfied. If true, move conditionally
    645     // result to v0.
    646     __ c(EQ, D, f12, f14);
    647     __ Movt(v0, a6);
    648   } else {
    649     Label skip;
    650     __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
    651     __ mov(v0, a4);  // Return LESS as result.
    652 
    653     __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
    654     __ mov(v0, a6);  // Return EQUAL as result.
    655 
    656     __ mov(v0, a5);  // Return GREATER as result.
    657     __ bind(&skip);
    658   }
    659   __ Ret();
    660 
    661   __ bind(&nan);
    662   // NaN comparisons always fail.
    663   // Load whatever we need in v0 to make the comparison fail.
    664   DCHECK(is_int16(GREATER) && is_int16(LESS));
    665   __ Ret(USE_DELAY_SLOT);
    666   if (cc == lt || cc == le) {
    667     __ li(v0, Operand(GREATER));
    668   } else {
    669     __ li(v0, Operand(LESS));
    670   }
    671 
    672 
    673   __ bind(&not_smis);
    674   // At this point we know we are dealing with two different objects,
    675   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
    676   if (strict()) {
    677     // This returns non-equal for some object types, or falls through if it
    678     // was not lucky.
    679     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
    680   }
    681 
    682   Label check_for_internalized_strings;
    683   Label flat_string_check;
    684   // Check for heap-number-heap-number comparison. Can jump to slow case,
    685   // or load both doubles and jump to the code that handles
    686   // that case. If the inputs are not doubles then jumps to
    687   // check_for_internalized_strings.
    688   // In this case a2 will contain the type of lhs_.
    689   EmitCheckForTwoHeapNumbers(masm,
    690                              lhs,
    691                              rhs,
    692                              &both_loaded_as_doubles,
    693                              &check_for_internalized_strings,
    694                              &flat_string_check);
    695 
    696   __ bind(&check_for_internalized_strings);
    697   if (cc == eq && !strict()) {
    698     // Returns an answer for two internalized strings or two
    699     // detectable objects.
    700     // Otherwise jumps to string case or not both strings case.
    701     // Assumes that a2 is the type of lhs_ on entry.
    702     EmitCheckForInternalizedStringsOrObjects(
    703         masm, lhs, rhs, &flat_string_check, &slow);
    704   }
    705 
    706   // Check for both being sequential one-byte strings,
    707   // and inline if that is the case.
    708   __ bind(&flat_string_check);
    709 
    710   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
    711 
    712   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
    713                       a3);
    714   if (cc == eq) {
    715     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
    716   } else {
    717     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
    718                                                     a5);
    719   }
    720   // Never falls through to here.
    721 
    722   __ bind(&slow);
    723   // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
    724   // a1 (rhs) second.
    725   __ Push(lhs, rhs);
    726   // Figure out which native to call and setup the arguments.
    727   if (cc == eq) {
    728     __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
    729   } else {
    730     int ncr;  // NaN compare result.
    731     if (cc == lt || cc == le) {
    732       ncr = GREATER;
    733     } else {
    734       DCHECK(cc == gt || cc == ge);  // Remaining cases.
    735       ncr = LESS;
    736     }
    737     __ li(a0, Operand(Smi::FromInt(ncr)));
    738     __ push(a0);
    739 
    740     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    741     // tagged as a small integer.
    742     __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
    743                                              : Runtime::kCompare);
    744   }
    745 
    746   __ bind(&miss);
    747   GenerateMiss(masm);
    748 }
    749 
    750 
    751 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
    752   __ mov(t9, ra);
    753   __ pop(ra);
    754   __ PushSafepointRegisters();
    755   __ Jump(t9);
    756 }
    757 
    758 
    759 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
    760   __ mov(t9, ra);
    761   __ pop(ra);
    762   __ PopSafepointRegisters();
    763   __ Jump(t9);
    764 }
    765 
    766 
    767 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
    768   // We don't allow a GC during a store buffer overflow so there is no need to
    769   // store the registers in any particular way, but we do have to store and
    770   // restore them.
    771   __ MultiPush(kJSCallerSaved | ra.bit());
    772   if (save_doubles()) {
    773     __ MultiPushFPU(kCallerSavedFPU);
    774   }
    775   const int argument_count = 1;
    776   const int fp_argument_count = 0;
    777   const Register scratch = a1;
    778 
    779   AllowExternalCallThatCantCauseGC scope(masm);
    780   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
    781   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
    782   __ CallCFunction(
    783       ExternalReference::store_buffer_overflow_function(isolate()),
    784       argument_count);
    785   if (save_doubles()) {
    786     __ MultiPopFPU(kCallerSavedFPU);
    787   }
    788 
    789   __ MultiPop(kJSCallerSaved | ra.bit());
    790   __ Ret();
    791 }
    792 
    793 
    794 void MathPowStub::Generate(MacroAssembler* masm) {
    795   const Register base = a1;
    796   const Register exponent = MathPowTaggedDescriptor::exponent();
    797   DCHECK(exponent.is(a2));
    798   const Register heapnumbermap = a5;
    799   const Register heapnumber = v0;
    800   const DoubleRegister double_base = f2;
    801   const DoubleRegister double_exponent = f4;
    802   const DoubleRegister double_result = f0;
    803   const DoubleRegister double_scratch = f6;
    804   const FPURegister single_scratch = f8;
    805   const Register scratch = t1;
    806   const Register scratch2 = a7;
    807 
    808   Label call_runtime, done, int_exponent;
    809   if (exponent_type() == ON_STACK) {
    810     Label base_is_smi, unpack_exponent;
    811     // The exponent and base are supplied as arguments on the stack.
    812     // This can only happen if the stub is called from non-optimized code.
    813     // Load input parameters from stack to double registers.
    814     __ ld(base, MemOperand(sp, 1 * kPointerSize));
    815     __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
    816 
    817     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
    818 
    819     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
    820     __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
    821     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
    822 
    823     __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
    824     __ jmp(&unpack_exponent);
    825 
    826     __ bind(&base_is_smi);
    827     __ mtc1(scratch, single_scratch);
    828     __ cvt_d_w(double_base, single_scratch);
    829     __ bind(&unpack_exponent);
    830 
    831     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    832 
    833     __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
    834     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
    835     __ ldc1(double_exponent,
    836             FieldMemOperand(exponent, HeapNumber::kValueOffset));
    837   } else if (exponent_type() == TAGGED) {
    838     // Base is already in double_base.
    839     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    840 
    841     __ ldc1(double_exponent,
    842             FieldMemOperand(exponent, HeapNumber::kValueOffset));
    843   }
    844 
    845   if (exponent_type() != INTEGER) {
    846     Label int_exponent_convert;
    847     // Detect integer exponents stored as double.
    848     __ EmitFPUTruncate(kRoundToMinusInf,
    849                        scratch,
    850                        double_exponent,
    851                        at,
    852                        double_scratch,
    853                        scratch2,
    854                        kCheckForInexactConversion);
    855     // scratch2 == 0 means there was no conversion error.
    856     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
    857 
    858     if (exponent_type() == ON_STACK) {
    859       // Detect square root case.  Crankshaft detects constant +/-0.5 at
    860       // compile time and uses DoMathPowHalf instead.  We then skip this check
    861       // for non-constant cases of +/-0.5 as these hardly occur.
    862       Label not_plus_half;
    863 
    864       // Test for 0.5.
    865       __ Move(double_scratch, 0.5);
    866       __ BranchF(USE_DELAY_SLOT,
    867                  &not_plus_half,
    868                  NULL,
    869                  ne,
    870                  double_exponent,
    871                  double_scratch);
    872       // double_scratch can be overwritten in the delay slot.
    873       // Calculates square root of base.  Check for the special case of
    874       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
    875       __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
    876       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
    877       __ neg_d(double_result, double_scratch);
    878 
    879       // Add +0 to convert -0 to +0.
    880       __ add_d(double_scratch, double_base, kDoubleRegZero);
    881       __ sqrt_d(double_result, double_scratch);
    882       __ jmp(&done);
    883 
    884       __ bind(&not_plus_half);
    885       __ Move(double_scratch, -0.5);
    886       __ BranchF(USE_DELAY_SLOT,
    887                  &call_runtime,
    888                  NULL,
    889                  ne,
    890                  double_exponent,
    891                  double_scratch);
    892       // double_scratch can be overwritten in the delay slot.
    893       // Calculates square root of base.  Check for the special case of
    894       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
    895       __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
    896       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
    897       __ Move(double_result, kDoubleRegZero);
    898 
    899       // Add +0 to convert -0 to +0.
    900       __ add_d(double_scratch, double_base, kDoubleRegZero);
    901       __ Move(double_result, 1.);
    902       __ sqrt_d(double_scratch, double_scratch);
    903       __ div_d(double_result, double_result, double_scratch);
    904       __ jmp(&done);
    905     }
    906 
    907     __ push(ra);
    908     {
    909       AllowExternalCallThatCantCauseGC scope(masm);
    910       __ PrepareCallCFunction(0, 2, scratch2);
    911       __ MovToFloatParameters(double_base, double_exponent);
    912       __ CallCFunction(
    913           ExternalReference::power_double_double_function(isolate()),
    914           0, 2);
    915     }
    916     __ pop(ra);
    917     __ MovFromFloatResult(double_result);
    918     __ jmp(&done);
    919 
    920     __ bind(&int_exponent_convert);
    921   }
    922 
    923   // Calculate power with integer exponent.
    924   __ bind(&int_exponent);
    925 
    926   // Get two copies of exponent in the registers scratch and exponent.
    927   if (exponent_type() == INTEGER) {
    928     __ mov(scratch, exponent);
    929   } else {
    930     // Exponent has previously been stored into scratch as untagged integer.
    931     __ mov(exponent, scratch);
    932   }
    933 
    934   __ mov_d(double_scratch, double_base);  // Back up base.
    935   __ Move(double_result, 1.0);
    936 
    937   // Get absolute value of exponent.
    938   Label positive_exponent;
    939   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
    940   __ Dsubu(scratch, zero_reg, scratch);
    941   __ bind(&positive_exponent);
    942 
    943   Label while_true, no_carry, loop_end;
    944   __ bind(&while_true);
    945 
    946   __ And(scratch2, scratch, 1);
    947 
    948   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
    949   __ mul_d(double_result, double_result, double_scratch);
    950   __ bind(&no_carry);
    951 
    952   __ dsra(scratch, scratch, 1);
    953 
    954   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
    955   __ mul_d(double_scratch, double_scratch, double_scratch);
    956 
    957   __ Branch(&while_true);
    958 
    959   __ bind(&loop_end);
    960 
    961   __ Branch(&done, ge, exponent, Operand(zero_reg));
    962   __ Move(double_scratch, 1.0);
    963   __ div_d(double_result, double_scratch, double_result);
    964   // Test whether result is zero.  Bail out to check for subnormal result.
    965   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
    966   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
    967 
    968   // double_exponent may not contain the exponent value if the input was a
    969   // smi.  We set it with exponent value before bailing out.
    970   __ mtc1(exponent, single_scratch);
    971   __ cvt_d_w(double_exponent, single_scratch);
    972 
    973   // Returning or bailing out.
    974   Counters* counters = isolate()->counters();
    975   if (exponent_type() == ON_STACK) {
    976     // The arguments are still on the stack.
    977     __ bind(&call_runtime);
    978     __ TailCallRuntime(Runtime::kMathPowRT);
    979 
    980     // The stub is called from non-optimized code, which expects the result
    981     // as heap number in exponent.
    982     __ bind(&done);
    983     __ AllocateHeapNumber(
    984         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
    985     __ sdc1(double_result,
    986             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
    987     DCHECK(heapnumber.is(v0));
    988     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
    989     __ DropAndRet(2);
    990   } else {
    991     __ push(ra);
    992     {
    993       AllowExternalCallThatCantCauseGC scope(masm);
    994       __ PrepareCallCFunction(0, 2, scratch);
    995       __ MovToFloatParameters(double_base, double_exponent);
    996       __ CallCFunction(
    997           ExternalReference::power_double_double_function(isolate()),
    998           0, 2);
    999     }
   1000     __ pop(ra);
   1001     __ MovFromFloatResult(double_result);
   1002 
   1003     __ bind(&done);
   1004     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
   1005     __ Ret();
   1006   }
   1007 }
   1008 
   1009 
   1010 bool CEntryStub::NeedsImmovableCode() {
   1011   return true;
   1012 }
   1013 
   1014 
   1015 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   1016   CEntryStub::GenerateAheadOfTime(isolate);
   1017   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   1018   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   1019   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
   1020   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   1021   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   1022   BinaryOpICStub::GenerateAheadOfTime(isolate);
   1023   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
   1024   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
   1025   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
   1026   StoreFastElementStub::GenerateAheadOfTime(isolate);
   1027   TypeofStub::GenerateAheadOfTime(isolate);
   1028 }
   1029 
   1030 
   1031 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
   1032   StoreRegistersStateStub stub(isolate);
   1033   stub.GetCode();
   1034 }
   1035 
   1036 
   1037 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
   1038   RestoreRegistersStateStub stub(isolate);
   1039   stub.GetCode();
   1040 }
   1041 
   1042 
   1043 void CodeStub::GenerateFPStubs(Isolate* isolate) {
   1044   // Generate if not already in cache.
   1045   SaveFPRegsMode mode = kSaveFPRegs;
   1046   CEntryStub(isolate, 1, mode).GetCode();
   1047   StoreBufferOverflowStub(isolate, mode).GetCode();
   1048   isolate->set_fp_stubs_generated(true);
   1049 }
   1050 
   1051 
   1052 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
   1053   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
   1054   stub.GetCode();
   1055 }
   1056 
   1057 
   1058 void CEntryStub::Generate(MacroAssembler* masm) {
   1059   // Called from JavaScript; parameters are on stack as if calling JS function
   1060   // a0: number of arguments including receiver
   1061   // a1: pointer to builtin function
   1062   // fp: frame pointer    (restored after C call)
   1063   // sp: stack pointer    (restored as callee's sp after C call)
   1064   // cp: current context  (C callee-saved)
   1065   //
   1066   // If argv_in_register():
   1067   // a2: pointer to the first argument
   1068 
   1069   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1070 
   1071   if (argv_in_register()) {
   1072     // Move argv into the correct register.
   1073     __ mov(s1, a2);
   1074   } else {
   1075     // Compute the argv pointer in a callee-saved register.
   1076     __ dsll(s1, a0, kPointerSizeLog2);
   1077     __ Daddu(s1, sp, s1);
   1078     __ Dsubu(s1, s1, kPointerSize);
   1079   }
   1080 
   1081   // Enter the exit frame that transitions from JavaScript to C++.
   1082   FrameScope scope(masm, StackFrame::MANUAL);
   1083   __ EnterExitFrame(save_doubles());
   1084 
   1085   // s0: number of arguments  including receiver (C callee-saved)
   1086   // s1: pointer to first argument (C callee-saved)
   1087   // s2: pointer to builtin function (C callee-saved)
   1088 
   1089   // Prepare arguments for C routine.
   1090   // a0 = argc
   1091   __ mov(s0, a0);
   1092   __ mov(s2, a1);
   1093   // a1 = argv (set in the delay slot after find_ra below).
   1094 
   1095   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
   1096   // also need to reserve the 4 argument slots on the stack.
   1097 
   1098   __ AssertStackIsAligned();
   1099 
   1100   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   1101 
   1102   // To let the GC traverse the return address of the exit frames, we need to
   1103   // know where the return address is. The CEntryStub is unmovable, so
   1104   // we can store the address on the stack to be able to find it again and
   1105   // we never have to restore it, because it will not change.
   1106   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
   1107     // This branch-and-link sequence is needed to find the current PC on mips,
   1108     // saved to the ra register.
   1109     // Use masm-> here instead of the double-underscore macro since extra
   1110     // coverage code can interfere with the proper calculation of ra.
   1111     Label find_ra;
   1112     masm->bal(&find_ra);  // bal exposes branch delay slot.
   1113     masm->mov(a1, s1);
   1114     masm->bind(&find_ra);
   1115 
   1116     // Adjust the value in ra to point to the correct return location, 2nd
   1117     // instruction past the real call into C code (the jalr(t9)), and push it.
   1118     // This is the return address of the exit frame.
   1119     const int kNumInstructionsToJump = 5;
   1120     masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
   1121     masm->sd(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
   1122     // Stack space reservation moved to the branch delay slot below.
   1123     // Stack is still aligned.
   1124 
   1125     // Call the C routine.
   1126     masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
   1127     masm->jalr(t9);
   1128     // Set up sp in the delay slot.
   1129     masm->daddiu(sp, sp, -kCArgsSlotsSize);
   1130     // Make sure the stored 'ra' points to this position.
   1131     DCHECK_EQ(kNumInstructionsToJump,
   1132               masm->InstructionsGeneratedSince(&find_ra));
   1133   }
   1134 
   1135   // Check result for exception sentinel.
   1136   Label exception_returned;
   1137   __ LoadRoot(a4, Heap::kExceptionRootIndex);
   1138   __ Branch(&exception_returned, eq, a4, Operand(v0));
   1139 
   1140   // Check that there is no pending exception, otherwise we
   1141   // should have returned the exception sentinel.
   1142   if (FLAG_debug_code) {
   1143     Label okay;
   1144     ExternalReference pending_exception_address(
   1145         Isolate::kPendingExceptionAddress, isolate());
   1146     __ li(a2, Operand(pending_exception_address));
   1147     __ ld(a2, MemOperand(a2));
   1148     __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
   1149     // Cannot use check here as it attempts to generate call into runtime.
   1150     __ Branch(&okay, eq, a4, Operand(a2));
   1151     __ stop("Unexpected pending exception");
   1152     __ bind(&okay);
   1153   }
   1154 
   1155   // Exit C frame and return.
   1156   // v0:v1: result
   1157   // sp: stack pointer
   1158   // fp: frame pointer
   1159   Register argc;
   1160   if (argv_in_register()) {
   1161     // We don't want to pop arguments so set argc to no_reg.
   1162     argc = no_reg;
   1163   } else {
   1164     // s0: still holds argc (callee-saved).
   1165     argc = s0;
   1166   }
   1167   __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
   1168 
   1169   // Handling of exception.
   1170   __ bind(&exception_returned);
   1171 
   1172   ExternalReference pending_handler_context_address(
   1173       Isolate::kPendingHandlerContextAddress, isolate());
   1174   ExternalReference pending_handler_code_address(
   1175       Isolate::kPendingHandlerCodeAddress, isolate());
   1176   ExternalReference pending_handler_offset_address(
   1177       Isolate::kPendingHandlerOffsetAddress, isolate());
   1178   ExternalReference pending_handler_fp_address(
   1179       Isolate::kPendingHandlerFPAddress, isolate());
   1180   ExternalReference pending_handler_sp_address(
   1181       Isolate::kPendingHandlerSPAddress, isolate());
   1182 
   1183   // Ask the runtime for help to determine the handler. This will set v0 to
   1184   // contain the current pending exception, don't clobber it.
   1185   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
   1186                                  isolate());
   1187   {
   1188     FrameScope scope(masm, StackFrame::MANUAL);
   1189     __ PrepareCallCFunction(3, 0, a0);
   1190     __ mov(a0, zero_reg);
   1191     __ mov(a1, zero_reg);
   1192     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   1193     __ CallCFunction(find_handler, 3);
   1194   }
   1195 
   1196   // Retrieve the handler context, SP and FP.
   1197   __ li(cp, Operand(pending_handler_context_address));
   1198   __ ld(cp, MemOperand(cp));
   1199   __ li(sp, Operand(pending_handler_sp_address));
   1200   __ ld(sp, MemOperand(sp));
   1201   __ li(fp, Operand(pending_handler_fp_address));
   1202   __ ld(fp, MemOperand(fp));
   1203 
   1204   // If the handler is a JS frame, restore the context to the frame. Note that
   1205   // the context will be set to (cp == 0) for non-JS frames.
   1206   Label zero;
   1207   __ Branch(&zero, eq, cp, Operand(zero_reg));
   1208   __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1209   __ bind(&zero);
   1210 
   1211   // Compute the handler entry address and jump to it.
   1212   __ li(a1, Operand(pending_handler_code_address));
   1213   __ ld(a1, MemOperand(a1));
   1214   __ li(a2, Operand(pending_handler_offset_address));
   1215   __ ld(a2, MemOperand(a2));
   1216   __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
   1217   __ Daddu(t9, a1, a2);
   1218   __ Jump(t9);
   1219 }
   1220 
   1221 
   1222 void JSEntryStub::Generate(MacroAssembler* masm) {
   1223   Label invoke, handler_entry, exit;
   1224   Isolate* isolate = masm->isolate();
   1225 
   1226   // TODO(plind): unify the ABI description here.
   1227   // Registers:
   1228   // a0: entry address
   1229   // a1: function
   1230   // a2: receiver
   1231   // a3: argc
   1232   // a4 (a4): on mips64
   1233 
   1234   // Stack:
   1235   // 0 arg slots on mips64 (4 args slots on mips)
   1236   // args -- in a4/a4 on mips64, on stack on mips
   1237 
   1238   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1239 
   1240   // Save callee saved registers on the stack.
   1241   __ MultiPush(kCalleeSaved | ra.bit());
   1242 
   1243   // Save callee-saved FPU registers.
   1244   __ MultiPushFPU(kCalleeSavedFPU);
   1245   // Set up the reserved register for 0.0.
   1246   __ Move(kDoubleRegZero, 0.0);
   1247 
   1248   // Load argv in s0 register.
   1249   if (kMipsAbi == kN64) {
   1250     __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
   1251   } else {  // Abi O32.
   1252     // 5th parameter on stack for O32 abi.
   1253     int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   1254     offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
   1255     __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
   1256   }
   1257 
   1258   __ InitializeRootRegister();
   1259 
   1260   // We build an EntryFrame.
   1261   __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
   1262   int marker = type();
   1263   __ li(a6, Operand(Smi::FromInt(marker)));
   1264   __ li(a5, Operand(Smi::FromInt(marker)));
   1265   ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
   1266   __ li(a4, Operand(c_entry_fp));
   1267   __ ld(a4, MemOperand(a4));
   1268   __ Push(a7, a6, a5, a4);
   1269   // Set up frame pointer for the frame to be pushed.
   1270   __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
   1271 
   1272   // Registers:
   1273   // a0: entry_address
   1274   // a1: function
   1275   // a2: receiver_pointer
   1276   // a3: argc
   1277   // s0: argv
   1278   //
   1279   // Stack:
   1280   // caller fp          |
   1281   // function slot      | entry frame
   1282   // context slot       |
   1283   // bad fp (0xff...f)  |
   1284   // callee saved registers + ra
   1285   // [ O32: 4 args slots]
   1286   // args
   1287 
   1288   // If this is the outermost JS call, set js_entry_sp value.
   1289   Label non_outermost_js;
   1290   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
   1291   __ li(a5, Operand(ExternalReference(js_entry_sp)));
   1292   __ ld(a6, MemOperand(a5));
   1293   __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
   1294   __ sd(fp, MemOperand(a5));
   1295   __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   1296   Label cont;
   1297   __ b(&cont);
   1298   __ nop();   // Branch delay slot nop.
   1299   __ bind(&non_outermost_js);
   1300   __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   1301   __ bind(&cont);
   1302   __ push(a4);
   1303 
   1304   // Jump to a faked try block that does the invoke, with a faked catch
   1305   // block that sets the pending exception.
   1306   __ jmp(&invoke);
   1307   __ bind(&handler_entry);
   1308   handler_offset_ = handler_entry.pos();
   1309   // Caught exception: Store result (exception) in the pending exception
   1310   // field in the JSEnv and return a failure sentinel.  Coming in here the
   1311   // fp will be invalid because the PushStackHandler below sets it to 0 to
   1312   // signal the existence of the JSEntry frame.
   1313   __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1314                                       isolate)));
   1315   __ sd(v0, MemOperand(a4));  // We come back from 'invoke'. result is in v0.
   1316   __ LoadRoot(v0, Heap::kExceptionRootIndex);
   1317   __ b(&exit);  // b exposes branch delay slot.
   1318   __ nop();   // Branch delay slot nop.
   1319 
   1320   // Invoke: Link this frame into the handler chain.
   1321   __ bind(&invoke);
   1322   __ PushStackHandler();
   1323   // If an exception not caught by another handler occurs, this handler
   1324   // returns control to the code after the bal(&invoke) above, which
   1325   // restores all kCalleeSaved registers (including cp and fp) to their
   1326   // saved values before returning a failure to C.
   1327 
   1328   // Clear any pending exceptions.
   1329   __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
   1330   __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1331                                       isolate)));
   1332   __ sd(a5, MemOperand(a4));
   1333 
   1334   // Invoke the function by calling through JS entry trampoline builtin.
   1335   // Notice that we cannot store a reference to the trampoline code directly in
   1336   // this stub, because runtime stubs are not traversed when doing GC.
   1337 
   1338   // Registers:
   1339   // a0: entry_address
   1340   // a1: function
   1341   // a2: receiver_pointer
   1342   // a3: argc
   1343   // s0: argv
   1344   //
   1345   // Stack:
   1346   // handler frame
   1347   // entry frame
   1348   // callee saved registers + ra
   1349   // [ O32: 4 args slots]
   1350   // args
   1351 
   1352   if (type() == StackFrame::ENTRY_CONSTRUCT) {
   1353     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
   1354                                       isolate);
   1355     __ li(a4, Operand(construct_entry));
   1356   } else {
   1357     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
   1358     __ li(a4, Operand(entry));
   1359   }
   1360   __ ld(t9, MemOperand(a4));  // Deref address.
   1361   // Call JSEntryTrampoline.
   1362   __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
   1363   __ Call(t9);
   1364 
   1365   // Unlink this frame from the handler chain.
   1366   __ PopStackHandler();
   1367 
   1368   __ bind(&exit);  // v0 holds result
   1369   // Check if the current stack frame is marked as the outermost JS frame.
   1370   Label non_outermost_js_2;
   1371   __ pop(a5);
   1372   __ Branch(&non_outermost_js_2,
   1373             ne,
   1374             a5,
   1375             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   1376   __ li(a5, Operand(ExternalReference(js_entry_sp)));
   1377   __ sd(zero_reg, MemOperand(a5));
   1378   __ bind(&non_outermost_js_2);
   1379 
   1380   // Restore the top frame descriptors from the stack.
   1381   __ pop(a5);
   1382   __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
   1383                                       isolate)));
   1384   __ sd(a5, MemOperand(a4));
   1385 
   1386   // Reset the stack to the callee saved registers.
   1387   __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
   1388 
   1389   // Restore callee-saved fpu registers.
   1390   __ MultiPopFPU(kCalleeSavedFPU);
   1391 
   1392   // Restore callee saved registers from the stack.
   1393   __ MultiPop(kCalleeSaved | ra.bit());
   1394   // Return.
   1395   __ Jump(ra);
   1396 }
   1397 
   1398 
   1399 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   1400   // Return address is in ra.
   1401   Label miss;
   1402 
   1403   Register receiver = LoadDescriptor::ReceiverRegister();
   1404   Register index = LoadDescriptor::NameRegister();
   1405   Register scratch = a5;
   1406   Register result = v0;
   1407   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   1408   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
   1409 
   1410   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
   1411                                           &miss,  // When not a string.
   1412                                           &miss,  // When not a number.
   1413                                           &miss,  // When index out of range.
   1414                                           STRING_INDEX_IS_ARRAY_INDEX,
   1415                                           RECEIVER_IS_STRING);
   1416   char_at_generator.GenerateFast(masm);
   1417   __ Ret();
   1418 
   1419   StubRuntimeCallHelper call_helper;
   1420   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
   1421 
   1422   __ bind(&miss);
   1423   PropertyAccessCompiler::TailCallBuiltin(
   1424       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
   1425 }
   1426 
   1427 
   1428 void InstanceOfStub::Generate(MacroAssembler* masm) {
   1429   Register const object = a1;              // Object (lhs).
   1430   Register const function = a0;            // Function (rhs).
   1431   Register const object_map = a2;          // Map of {object}.
   1432   Register const function_map = a3;        // Map of {function}.
   1433   Register const function_prototype = a4;  // Prototype of {function}.
   1434   Register const scratch = a5;
   1435 
   1436   DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
   1437   DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
   1438 
   1439   // Check if {object} is a smi.
   1440   Label object_is_smi;
   1441   __ JumpIfSmi(object, &object_is_smi);
   1442 
   1443   // Lookup the {function} and the {object} map in the global instanceof cache.
   1444   // Note: This is safe because we clear the global instanceof cache whenever
   1445   // we change the prototype of any object.
   1446   Label fast_case, slow_case;
   1447   __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   1448   __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
   1449   __ Branch(&fast_case, ne, function, Operand(at));
   1450   __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
   1451   __ Branch(&fast_case, ne, object_map, Operand(at));
   1452   __ Ret(USE_DELAY_SLOT);
   1453   __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
   1454 
   1455   // If {object} is a smi we can safely return false if {function} is a JS
   1456   // function, otherwise we have to miss to the runtime and throw an exception.
   1457   __ bind(&object_is_smi);
   1458   __ JumpIfSmi(function, &slow_case);
   1459   __ GetObjectType(function, function_map, scratch);
   1460   __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
   1461   __ Ret(USE_DELAY_SLOT);
   1462   __ LoadRoot(v0, Heap::kFalseValueRootIndex);  // In delay slot.
   1463 
   1464   // Fast-case: The {function} must be a valid JSFunction.
   1465   __ bind(&fast_case);
   1466   __ JumpIfSmi(function, &slow_case);
   1467   __ GetObjectType(function, function_map, scratch);
   1468   __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
   1469 
   1470   // Ensure that {function} has an instance prototype.
   1471   __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
   1472   __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
   1473   __ Branch(&slow_case, ne, at, Operand(zero_reg));
   1474 
   1475   // Get the "prototype" (or initial map) of the {function}.
   1476   __ ld(function_prototype,
   1477         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   1478   __ AssertNotSmi(function_prototype);
   1479 
   1480   // Resolve the prototype if the {function} has an initial map.  Afterwards the
   1481   // {function_prototype} will be either the JSReceiver prototype object or the
   1482   // hole value, which means that no instances of the {function} were created so
   1483   // far and hence we should return false.
   1484   Label function_prototype_valid;
   1485   __ GetObjectType(function_prototype, scratch, scratch);
   1486   __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
   1487   __ ld(function_prototype,
   1488         FieldMemOperand(function_prototype, Map::kPrototypeOffset));
   1489   __ bind(&function_prototype_valid);
   1490   __ AssertNotSmi(function_prototype);
   1491 
   1492   // Update the global instanceof cache with the current {object} map and
   1493   // {function}.  The cached answer will be set when it is known below.
   1494   __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
   1495   __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
   1496 
   1497   // Loop through the prototype chain looking for the {function} prototype.
   1498   // Assume true, and change to false if not found.
   1499   Register const object_instance_type = function_map;
   1500   Register const map_bit_field = function_map;
   1501   Register const null = scratch;
   1502   Register const result = v0;
   1503 
   1504   Label done, loop, fast_runtime_fallback;
   1505   __ LoadRoot(result, Heap::kTrueValueRootIndex);
   1506   __ LoadRoot(null, Heap::kNullValueRootIndex);
   1507   __ bind(&loop);
   1508 
   1509   // Check if the object needs to be access checked.
   1510   __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
   1511   __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
   1512   __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
   1513   // Check if the current object is a Proxy.
   1514   __ lbu(object_instance_type,
   1515          FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   1516   __ Branch(&fast_runtime_fallback, eq, object_instance_type,
   1517             Operand(JS_PROXY_TYPE));
   1518 
   1519   __ ld(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
   1520   __ Branch(&done, eq, object, Operand(function_prototype));
   1521   __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
   1522   __ ld(object_map,
   1523         FieldMemOperand(object, HeapObject::kMapOffset));  // In delay slot.
   1524   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   1525   __ bind(&done);
   1526   __ Ret(USE_DELAY_SLOT);
   1527   __ StoreRoot(result,
   1528                Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
   1529 
   1530   // Found Proxy or access check needed: Call the runtime
   1531   __ bind(&fast_runtime_fallback);
   1532   __ Push(object, function_prototype);
   1533   // Invalidate the instanceof cache.
   1534   DCHECK(Smi::FromInt(0) == 0);
   1535   __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
   1536   __ TailCallRuntime(Runtime::kHasInPrototypeChain);
   1537 
   1538   // Slow-case: Call the %InstanceOf runtime function.
   1539   __ bind(&slow_case);
   1540   __ Push(object, function);
   1541   __ TailCallRuntime(Runtime::kInstanceOf);
   1542 }
   1543 
   1544 
   1545 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   1546   Label miss;
   1547   Register receiver = LoadDescriptor::ReceiverRegister();
   1548   // Ensure that the vector and slot registers won't be clobbered before
   1549   // calling the miss handler.
   1550   DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::VectorRegister(),
   1551                      LoadWithVectorDescriptor::SlotRegister()));
   1552 
   1553   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
   1554                                                           a5, &miss);
   1555   __ bind(&miss);
   1556   PropertyAccessCompiler::TailCallBuiltin(
   1557       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
   1558 }
   1559 
   1560 
   1561 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   1562   // The displacement is the offset of the last parameter (if any)
   1563   // relative to the frame pointer.
   1564   const int kDisplacement =
   1565       StandardFrameConstants::kCallerSPOffset - kPointerSize;
   1566   DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
   1567   DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
   1568 
   1569   // Check that the key is a smiGenerateReadElement.
   1570   Label slow;
   1571   __ JumpIfNotSmi(a1, &slow);
   1572 
   1573   // Check if the calling frame is an arguments adaptor frame.
   1574   Label adaptor;
   1575   __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1576   __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
   1577   __ Branch(&adaptor,
   1578             eq,
   1579             a3,
   1580             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   1581 
   1582   // Check index (a1) against formal parameters count limit passed in
   1583   // through register a0. Use unsigned comparison to get negative
   1584   // check for free.
   1585   __ Branch(&slow, hs, a1, Operand(a0));
   1586 
   1587   // Read the argument from the stack and return it.
   1588   __ dsubu(a3, a0, a1);
   1589   __ SmiScale(a7, a3, kPointerSizeLog2);
   1590   __ Daddu(a3, fp, Operand(a7));
   1591   __ Ret(USE_DELAY_SLOT);
   1592   __ ld(v0, MemOperand(a3, kDisplacement));
   1593 
   1594   // Arguments adaptor case: Check index (a1) against actual arguments
   1595   // limit found in the arguments adaptor frame. Use unsigned
   1596   // comparison to get negative check for free.
   1597   __ bind(&adaptor);
   1598   __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1599   __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
   1600 
   1601   // Read the argument from the adaptor frame and return it.
   1602   __ dsubu(a3, a0, a1);
   1603   __ SmiScale(a7, a3, kPointerSizeLog2);
   1604   __ Daddu(a3, a2, Operand(a7));
   1605   __ Ret(USE_DELAY_SLOT);
   1606   __ ld(v0, MemOperand(a3, kDisplacement));
   1607 
   1608   // Slow-case: Handle non-smi or out-of-bounds access to arguments
   1609   // by calling the runtime system.
   1610   __ bind(&slow);
   1611   __ push(a1);
   1612   __ TailCallRuntime(Runtime::kArguments);
   1613 }
   1614 
   1615 
   1616 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
   1617   // a1 : function
   1618   // a2 : number of parameters (tagged)
   1619   // a3 : parameters pointer
   1620 
   1621   DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
   1622   DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
   1623   DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
   1624 
   1625   // Check if the calling frame is an arguments adaptor frame.
   1626   Label runtime;
   1627   __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1628   __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
   1629   __ Branch(&runtime, ne, a0,
   1630             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   1631 
   1632   // Patch the arguments.length and the parameters pointer in the current frame.
   1633   __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1634   __ SmiScale(a7, a2, kPointerSizeLog2);
   1635   __ Daddu(a4, a4, Operand(a7));
   1636   __ daddiu(a3, a4, StandardFrameConstants::kCallerSPOffset);
   1637 
   1638   __ bind(&runtime);
   1639   __ Push(a1, a3, a2);
   1640   __ TailCallRuntime(Runtime::kNewSloppyArguments);
   1641 }
   1642 
   1643 
   1644 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
   1645   // a1 : function
   1646   // a2 : number of parameters (tagged)
   1647   // a3 : parameters pointer
   1648   // Registers used over whole function:
   1649   //  a5 : arguments count (tagged)
   1650   //  a6 : mapped parameter count (tagged)
   1651 
   1652   DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
   1653   DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
   1654   DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
   1655 
   1656   // Check if the calling frame is an arguments adaptor frame.
   1657   Label adaptor_frame, try_allocate, runtime;
   1658   __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1659   __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
   1660   __ Branch(&adaptor_frame, eq, a0,
   1661             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   1662 
   1663   // No adaptor, parameter count = argument count.
   1664   __ mov(a5, a2);
   1665   __ Branch(USE_DELAY_SLOT, &try_allocate);
   1666   __ mov(a6, a2);  // In delay slot.
   1667 
   1668   // We have an adaptor frame. Patch the parameters pointer.
   1669   __ bind(&adaptor_frame);
   1670   __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1671   __ SmiScale(t2, a5, kPointerSizeLog2);
   1672   __ Daddu(a4, a4, Operand(t2));
   1673   __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
   1674 
   1675   // a5 = argument count (tagged)
   1676   // a6 = parameter count (tagged)
   1677   // Compute the mapped parameter count = min(a6, a5) in a6.
   1678   __ mov(a6, a2);
   1679   __ Branch(&try_allocate, le, a6, Operand(a5));
   1680   __ mov(a6, a5);
   1681 
   1682   __ bind(&try_allocate);
   1683 
   1684   // Compute the sizes of backing store, parameter map, and arguments object.
   1685   // 1. Parameter map, has 2 extra words containing context and backing store.
   1686   const int kParameterMapHeaderSize =
   1687       FixedArray::kHeaderSize + 2 * kPointerSize;
   1688   // If there are no mapped parameters, we do not need the parameter_map.
   1689   Label param_map_size;
   1690   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
   1691   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
   1692   __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
   1693   __ SmiScale(t1, a6, kPointerSizeLog2);
   1694   __ daddiu(t1, t1, kParameterMapHeaderSize);
   1695   __ bind(&param_map_size);
   1696 
   1697   // 2. Backing store.
   1698   __ SmiScale(t2, a5, kPointerSizeLog2);
   1699   __ Daddu(t1, t1, Operand(t2));
   1700   __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
   1701 
   1702   // 3. Arguments object.
   1703   __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
   1704 
   1705   // Do the allocation of all three objects in one go.
   1706   __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
   1707 
   1708   // v0 = address of new object(s) (tagged)
   1709   // a2 = argument count (smi-tagged)
   1710   // Get the arguments boilerplate from the current native context into a4.
   1711   const int kNormalOffset =
   1712       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
   1713   const int kAliasedOffset =
   1714       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
   1715 
   1716   __ ld(a4, NativeContextMemOperand());
   1717   Label skip2_ne, skip2_eq;
   1718   __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
   1719   __ ld(a4, MemOperand(a4, kNormalOffset));
   1720   __ bind(&skip2_ne);
   1721 
   1722   __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
   1723   __ ld(a4, MemOperand(a4, kAliasedOffset));
   1724   __ bind(&skip2_eq);
   1725 
   1726   // v0 = address of new object (tagged)
   1727   // a2 = argument count (smi-tagged)
   1728   // a4 = address of arguments map (tagged)
   1729   // a6 = mapped parameter count (tagged)
   1730   __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
   1731   __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
   1732   __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
   1733   __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
   1734 
   1735   // Set up the callee in-object property.
   1736   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   1737   __ AssertNotSmi(a1);
   1738   const int kCalleeOffset = JSObject::kHeaderSize +
   1739       Heap::kArgumentsCalleeIndex * kPointerSize;
   1740   __ sd(a1, FieldMemOperand(v0, kCalleeOffset));
   1741 
   1742   // Use the length (smi tagged) and set that as an in-object property too.
   1743   __ AssertSmi(a5);
   1744   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   1745   const int kLengthOffset = JSObject::kHeaderSize +
   1746       Heap::kArgumentsLengthIndex * kPointerSize;
   1747   __ sd(a5, FieldMemOperand(v0, kLengthOffset));
   1748 
   1749   // Set up the elements pointer in the allocated arguments object.
   1750   // If we allocated a parameter map, a4 will point there, otherwise
   1751   // it will point to the backing store.
   1752   __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
   1753   __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
   1754 
   1755   // v0 = address of new object (tagged)
   1756   // a2 = argument count (tagged)
   1757   // a4 = address of parameter map or backing store (tagged)
   1758   // a6 = mapped parameter count (tagged)
   1759   // Initialize parameter map. If there are no mapped arguments, we're done.
   1760   Label skip_parameter_map;
   1761   Label skip3;
   1762   __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
   1763   // Move backing store address to a1, because it is
   1764   // expected there when filling in the unmapped arguments.
   1765   __ mov(a1, a4);
   1766   __ bind(&skip3);
   1767 
   1768   __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
   1769 
   1770   __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
   1771   __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
   1772   __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
   1773   __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
   1774   __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
   1775   __ SmiScale(t2, a6, kPointerSizeLog2);
   1776   __ Daddu(a5, a4, Operand(t2));
   1777   __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
   1778   __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
   1779 
   1780   // Copy the parameter slots and the holes in the arguments.
   1781   // We need to fill in mapped_parameter_count slots. They index the context,
   1782   // where parameters are stored in reverse order, at
   1783   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
   1784   // The mapped parameter thus need to get indices
   1785   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
   1786   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
   1787   // We loop from right to left.
   1788   Label parameters_loop, parameters_test;
   1789   __ mov(a5, a6);
   1790   __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
   1791   __ Dsubu(t1, t1, Operand(a6));
   1792   __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
   1793   __ SmiScale(t2, a5, kPointerSizeLog2);
   1794   __ Daddu(a1, a4, Operand(t2));
   1795   __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
   1796 
   1797   // a1 = address of backing store (tagged)
   1798   // a4 = address of parameter map (tagged)
   1799   // a0 = temporary scratch (a.o., for address calculation)
   1800   // t1 = loop variable (tagged)
   1801   // a7 = the hole value
   1802   __ jmp(&parameters_test);
   1803 
   1804   __ bind(&parameters_loop);
   1805   __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
   1806   __ SmiScale(a0, a5, kPointerSizeLog2);
   1807   __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
   1808   __ Daddu(t2, a4, a0);
   1809   __ sd(t1, MemOperand(t2));
   1810   __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
   1811   __ Daddu(t2, a1, a0);
   1812   __ sd(a7, MemOperand(t2));
   1813   __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
   1814   __ bind(&parameters_test);
   1815   __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
   1816 
   1817   // Restore t1 = argument count (tagged).
   1818   __ ld(a5, FieldMemOperand(v0, kLengthOffset));
   1819 
   1820   __ bind(&skip_parameter_map);
   1821   // v0 = address of new object (tagged)
   1822   // a1 = address of backing store (tagged)
   1823   // a5 = argument count (tagged)
   1824   // a6 = mapped parameter count (tagged)
   1825   // t1 = scratch
   1826   // Copy arguments header and remaining slots (if there are any).
   1827   __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
   1828   __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
   1829   __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
   1830 
   1831   Label arguments_loop, arguments_test;
   1832   __ SmiScale(t2, a6, kPointerSizeLog2);
   1833   __ Dsubu(a3, a3, Operand(t2));
   1834   __ jmp(&arguments_test);
   1835 
   1836   __ bind(&arguments_loop);
   1837   __ Dsubu(a3, a3, Operand(kPointerSize));
   1838   __ ld(a4, MemOperand(a3, 0));
   1839   __ SmiScale(t2, a6, kPointerSizeLog2);
   1840   __ Daddu(t1, a1, Operand(t2));
   1841   __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
   1842   __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
   1843 
   1844   __ bind(&arguments_test);
   1845   __ Branch(&arguments_loop, lt, a6, Operand(a5));
   1846 
   1847   // Return.
   1848   __ Ret();
   1849 
   1850   // Do the runtime call to allocate the arguments object.
   1851   // a5 = argument count (tagged)
   1852   __ bind(&runtime);
   1853   __ Push(a1, a3, a5);
   1854   __ TailCallRuntime(Runtime::kNewSloppyArguments);
   1855 }
   1856 
   1857 
   1858 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   1859   // Return address is in ra.
   1860   Label slow;
   1861 
   1862   Register receiver = LoadDescriptor::ReceiverRegister();
   1863   Register key = LoadDescriptor::NameRegister();
   1864 
   1865   // Check that the key is an array index, that is Uint32.
   1866   __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
   1867   __ Branch(&slow, ne, t0, Operand(zero_reg));
   1868 
   1869   // Everything is fine, call runtime.
   1870   __ Push(receiver, key);  // Receiver, key.
   1871 
   1872   // Perform tail call to the entry.
   1873   __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
   1874 
   1875   __ bind(&slow);
   1876   PropertyAccessCompiler::TailCallBuiltin(
   1877       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
   1878 }
   1879 
   1880 
   1881 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
   1882   // a1 : function
   1883   // a2 : number of parameters (tagged)
   1884   // a3 : parameters pointer
   1885 
   1886   DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
   1887   DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
   1888   DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
   1889 
   1890   // Check if the calling frame is an arguments adaptor frame.
   1891   Label try_allocate, runtime;
   1892   __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1893   __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
   1894   __ Branch(&try_allocate, ne, a0,
   1895             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   1896 
   1897   // Patch the arguments.length and the parameters pointer.
   1898   __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1899   __ SmiScale(at, a2, kPointerSizeLog2);
   1900   __ Daddu(a4, a4, Operand(at));
   1901   __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
   1902 
   1903   // Try the new space allocation. Start out with computing the size
   1904   // of the arguments object and the elements array in words.
   1905   Label add_arguments_object;
   1906   __ bind(&try_allocate);
   1907   __ SmiUntag(t1, a2);
   1908   __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
   1909 
   1910   __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
   1911   __ bind(&add_arguments_object);
   1912   __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
   1913 
   1914   // Do the allocation of both objects in one go.
   1915   __ Allocate(t1, v0, a4, a5, &runtime,
   1916               static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
   1917 
   1918   // Get the arguments boilerplate from the current native context.
   1919   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
   1920 
   1921   __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
   1922   __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
   1923   __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
   1924   __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
   1925 
   1926   // Get the length (smi tagged) and set that as an in-object property too.
   1927   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   1928   __ AssertSmi(a2);
   1929   __ sd(a2,
   1930         FieldMemOperand(v0, JSObject::kHeaderSize +
   1931                                 Heap::kArgumentsLengthIndex * kPointerSize));
   1932 
   1933   Label done;
   1934   __ Branch(&done, eq, a2, Operand(zero_reg));
   1935 
   1936   // Set up the elements pointer in the allocated arguments object and
   1937   // initialize the header in the elements fixed array.
   1938   __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
   1939   __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
   1940   __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
   1941   __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
   1942   __ sd(a2, FieldMemOperand(a4, FixedArray::kLengthOffset));
   1943   __ SmiUntag(a2);
   1944 
   1945   // Copy the fixed array slots.
   1946   Label loop;
   1947   // Set up a4 to point to the first array slot.
   1948   __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   1949   __ bind(&loop);
   1950   // Pre-decrement a3 with kPointerSize on each iteration.
   1951   // Pre-decrement in order to skip receiver.
   1952   __ Daddu(a3, a3, Operand(-kPointerSize));
   1953   __ ld(a5, MemOperand(a3));
   1954   // Post-increment a4 with kPointerSize on each iteration.
   1955   __ sd(a5, MemOperand(a4));
   1956   __ Daddu(a4, a4, Operand(kPointerSize));
   1957   __ Dsubu(a2, a2, Operand(1));
   1958   __ Branch(&loop, ne, a2, Operand(zero_reg));
   1959 
   1960   // Return.
   1961   __ bind(&done);
   1962   __ Ret();
   1963 
   1964   // Do the runtime call to allocate the arguments object.
   1965   __ bind(&runtime);
   1966   __ Push(a1, a3, a2);
   1967   __ TailCallRuntime(Runtime::kNewStrictArguments);
   1968 }
   1969 
   1970 
   1971 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
   1972   // a2 : number of parameters (tagged)
   1973   // a3 : parameters pointer
   1974   // a4 : rest parameter index (tagged)
   1975   // Check if the calling frame is an arguments adaptor frame.
   1976 
   1977   Label runtime;
   1978   __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1979   __ ld(a5, MemOperand(a0, StandardFrameConstants::kContextOffset));
   1980   __ Branch(&runtime, ne, a5,
   1981             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   1982 
   1983   // Patch the arguments.length and the parameters pointer.
   1984   __ ld(a2, MemOperand(a0, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1985   __ SmiScale(at, a2, kPointerSizeLog2);
   1986 
   1987   __ Daddu(a3, a0, Operand(at));
   1988   __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
   1989 
   1990   // Do the runtime call to allocate the arguments object.
   1991   __ bind(&runtime);
   1992   __ Push(a2, a3, a4);
   1993   __ TailCallRuntime(Runtime::kNewRestParam);
   1994 }
   1995 
   1996 
   1997 void RegExpExecStub::Generate(MacroAssembler* masm) {
   1998   // Just jump directly to runtime if native RegExp is not selected at compile
   1999   // time or if regexp entry in generated code is turned off runtime switch or
   2000   // at compilation.
   2001 #ifdef V8_INTERPRETED_REGEXP
   2002   __ TailCallRuntime(Runtime::kRegExpExec);
   2003 #else  // V8_INTERPRETED_REGEXP
   2004 
   2005   // Stack frame on entry.
   2006   //  sp[0]: last_match_info (expected JSArray)
   2007   //  sp[4]: previous index
   2008   //  sp[8]: subject string
   2009   //  sp[12]: JSRegExp object
   2010 
   2011   const int kLastMatchInfoOffset = 0 * kPointerSize;
   2012   const int kPreviousIndexOffset = 1 * kPointerSize;
   2013   const int kSubjectOffset = 2 * kPointerSize;
   2014   const int kJSRegExpOffset = 3 * kPointerSize;
   2015 
   2016   Label runtime;
   2017   // Allocation of registers for this function. These are in callee save
   2018   // registers and will be preserved by the call to the native RegExp code, as
   2019   // this code is called using the normal C calling convention. When calling
   2020   // directly from generated code the native RegExp code will not do a GC and
   2021   // therefore the content of these registers are safe to use after the call.
   2022   // MIPS - using s0..s2, since we are not using CEntry Stub.
   2023   Register subject = s0;
   2024   Register regexp_data = s1;
   2025   Register last_match_info_elements = s2;
   2026 
   2027   // Ensure that a RegExp stack is allocated.
   2028   ExternalReference address_of_regexp_stack_memory_address =
   2029       ExternalReference::address_of_regexp_stack_memory_address(
   2030           isolate());
   2031   ExternalReference address_of_regexp_stack_memory_size =
   2032       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   2033   __ li(a0, Operand(address_of_regexp_stack_memory_size));
   2034   __ ld(a0, MemOperand(a0, 0));
   2035   __ Branch(&runtime, eq, a0, Operand(zero_reg));
   2036 
   2037   // Check that the first argument is a JSRegExp object.
   2038   __ ld(a0, MemOperand(sp, kJSRegExpOffset));
   2039   STATIC_ASSERT(kSmiTag == 0);
   2040   __ JumpIfSmi(a0, &runtime);
   2041   __ GetObjectType(a0, a1, a1);
   2042   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
   2043 
   2044   // Check that the RegExp has been compiled (data contains a fixed array).
   2045   __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
   2046   if (FLAG_debug_code) {
   2047     __ SmiTst(regexp_data, a4);
   2048     __ Check(nz,
   2049              kUnexpectedTypeForRegExpDataFixedArrayExpected,
   2050              a4,
   2051              Operand(zero_reg));
   2052     __ GetObjectType(regexp_data, a0, a0);
   2053     __ Check(eq,
   2054              kUnexpectedTypeForRegExpDataFixedArrayExpected,
   2055              a0,
   2056              Operand(FIXED_ARRAY_TYPE));
   2057   }
   2058 
   2059   // regexp_data: RegExp data (FixedArray)
   2060   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   2061   __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   2062   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
   2063 
   2064   // regexp_data: RegExp data (FixedArray)
   2065   // Check that the number of captures fit in the static offsets vector buffer.
   2066   __ ld(a2,
   2067          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   2068   // Check (number_of_captures + 1) * 2 <= offsets vector size
   2069   // Or          number_of_captures * 2 <= offsets vector size - 2
   2070   // Or          number_of_captures     <= offsets vector size / 2 - 1
   2071   // Multiplying by 2 comes for free since a2 is smi-tagged.
   2072   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   2073   int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
   2074   __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
   2075 
   2076   // Reset offset for possibly sliced string.
   2077   __ mov(t0, zero_reg);
   2078   __ ld(subject, MemOperand(sp, kSubjectOffset));
   2079   __ JumpIfSmi(subject, &runtime);
   2080   __ mov(a3, subject);  // Make a copy of the original subject string.
   2081   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   2082   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   2083   // subject: subject string
   2084   // a3: subject string
   2085   // a0: subject string instance type
   2086   // regexp_data: RegExp data (FixedArray)
   2087   // Handle subject string according to its encoding and representation:
   2088   // (1) Sequential string?  If yes, go to (5).
   2089   // (2) Anything but sequential or cons?  If yes, go to (6).
   2090   // (3) Cons string.  If the string is flat, replace subject with first string.
   2091   //     Otherwise bailout.
   2092   // (4) Is subject external?  If yes, go to (7).
   2093   // (5) Sequential string.  Load regexp code according to encoding.
   2094   // (E) Carry on.
   2095   /// [...]
   2096 
   2097   // Deferred code at the end of the stub:
   2098   // (6) Not a long external string?  If yes, go to (8).
   2099   // (7) External string.  Make it, offset-wise, look like a sequential string.
   2100   //     Go to (5).
   2101   // (8) Short external string or not a string?  If yes, bail out to runtime.
   2102   // (9) Sliced string.  Replace subject with parent.  Go to (4).
   2103 
   2104   Label check_underlying;   // (4)
   2105   Label seq_string;         // (5)
   2106   Label not_seq_nor_cons;   // (6)
   2107   Label external_string;    // (7)
   2108   Label not_long_external;  // (8)
   2109 
   2110   // (1) Sequential string?  If yes, go to (5).
   2111   __ And(a1,
   2112          a0,
   2113          Operand(kIsNotStringMask |
   2114                  kStringRepresentationMask |
   2115                  kShortExternalStringMask));
   2116   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   2117   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
   2118 
   2119   // (2) Anything but sequential or cons?  If yes, go to (6).
   2120   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   2121   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   2122   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   2123   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   2124   // Go to (6).
   2125   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
   2126 
   2127   // (3) Cons string.  Check that it's flat.
   2128   // Replace subject with first string and reload instance type.
   2129   __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
   2130   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
   2131   __ Branch(&runtime, ne, a0, Operand(a1));
   2132   __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   2133 
   2134   // (4) Is subject external?  If yes, go to (7).
   2135   __ bind(&check_underlying);
   2136   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   2137   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   2138   STATIC_ASSERT(kSeqStringTag == 0);
   2139   __ And(at, a0, Operand(kStringRepresentationMask));
   2140   // The underlying external string is never a short external string.
   2141   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
   2142   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
   2143   __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
   2144 
   2145   // (5) Sequential string.  Load regexp code according to encoding.
   2146   __ bind(&seq_string);
   2147   // subject: sequential subject string (or look-alike, external string)
   2148   // a3: original subject string
   2149   // Load previous index and check range before a3 is overwritten.  We have to
   2150   // use a3 instead of subject here because subject might have been only made
   2151   // to look like a sequential string when it actually is an external string.
   2152   __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
   2153   __ JumpIfNotSmi(a1, &runtime);
   2154   __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
   2155   __ Branch(&runtime, ls, a3, Operand(a1));
   2156   __ SmiUntag(a1);
   2157 
   2158   STATIC_ASSERT(kStringEncodingMask == 4);
   2159   STATIC_ASSERT(kOneByteStringTag == 4);
   2160   STATIC_ASSERT(kTwoByteStringTag == 0);
   2161   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
   2162   __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
   2163   __ dsra(a3, a0, 2);  // a3 is 1 for one_byte, 0 for UC16 (used below).
   2164   __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   2165   __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
   2166 
   2167   // (E) Carry on.  String handling is done.
   2168   // t9: irregexp code
   2169   // Check that the irregexp code has been generated for the actual string
   2170   // encoding. If it has, the field contains a code object otherwise it contains
   2171   // a smi (code flushing support).
   2172   __ JumpIfSmi(t9, &runtime);
   2173 
   2174   // a1: previous index
   2175   // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
   2176   // t9: code
   2177   // subject: Subject string
   2178   // regexp_data: RegExp data (FixedArray)
   2179   // All checks done. Now push arguments for native regexp code.
   2180   __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
   2181                       1, a0, a2);
   2182 
   2183   // Isolates: note we add an additional parameter here (isolate pointer).
   2184   const int kRegExpExecuteArguments = 9;
   2185   const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
   2186   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
   2187 
   2188   // Stack pointer now points to cell where return address is to be written.
   2189   // Arguments are before that on the stack or in registers, meaning we
   2190   // treat the return address as argument 5. Thus every argument after that
   2191   // needs to be shifted back by 1. Since DirectCEntryStub will handle
   2192   // allocating space for the c argument slots, we don't need to calculate
   2193   // that into the argument positions on the stack. This is how the stack will
   2194   // look (sp meaning the value of sp at this moment):
   2195   // Abi n64:
   2196   //   [sp + 1] - Argument 9
   2197   //   [sp + 0] - saved ra
   2198   // Abi O32:
   2199   //   [sp + 5] - Argument 9
   2200   //   [sp + 4] - Argument 8
   2201   //   [sp + 3] - Argument 7
   2202   //   [sp + 2] - Argument 6
   2203   //   [sp + 1] - Argument 5
   2204   //   [sp + 0] - saved ra
   2205 
   2206   if (kMipsAbi == kN64) {
   2207     // Argument 9: Pass current isolate address.
   2208     __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
   2209     __ sd(a0, MemOperand(sp, 1 * kPointerSize));
   2210 
   2211     // Argument 8: Indicate that this is a direct call from JavaScript.
   2212     __ li(a7, Operand(1));
   2213 
   2214     // Argument 7: Start (high end) of backtracking stack memory area.
   2215     __ li(a0, Operand(address_of_regexp_stack_memory_address));
   2216     __ ld(a0, MemOperand(a0, 0));
   2217     __ li(a2, Operand(address_of_regexp_stack_memory_size));
   2218     __ ld(a2, MemOperand(a2, 0));
   2219     __ daddu(a6, a0, a2);
   2220 
   2221     // Argument 6: Set the number of capture registers to zero to force global
   2222     // regexps to behave as non-global. This does not affect non-global regexps.
   2223     __ mov(a5, zero_reg);
   2224 
   2225     // Argument 5: static offsets vector buffer.
   2226     __ li(a4, Operand(
   2227           ExternalReference::address_of_static_offsets_vector(isolate())));
   2228   } else {  // O32.
   2229     DCHECK(kMipsAbi == kO32);
   2230 
   2231     // Argument 9: Pass current isolate address.
   2232     // CFunctionArgumentOperand handles MIPS stack argument slots.
   2233     __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
   2234     __ sd(a0, MemOperand(sp, 5 * kPointerSize));
   2235 
   2236     // Argument 8: Indicate that this is a direct call from JavaScript.
   2237     __ li(a0, Operand(1));
   2238     __ sd(a0, MemOperand(sp, 4 * kPointerSize));
   2239 
   2240     // Argument 7: Start (high end) of backtracking stack memory area.
   2241     __ li(a0, Operand(address_of_regexp_stack_memory_address));
   2242     __ ld(a0, MemOperand(a0, 0));
   2243     __ li(a2, Operand(address_of_regexp_stack_memory_size));
   2244     __ ld(a2, MemOperand(a2, 0));
   2245     __ daddu(a0, a0, a2);
   2246     __ sd(a0, MemOperand(sp, 3 * kPointerSize));
   2247 
   2248     // Argument 6: Set the number of capture registers to zero to force global
   2249     // regexps to behave as non-global. This does not affect non-global regexps.
   2250     __ mov(a0, zero_reg);
   2251     __ sd(a0, MemOperand(sp, 2 * kPointerSize));
   2252 
   2253     // Argument 5: static offsets vector buffer.
   2254     __ li(a0, Operand(
   2255           ExternalReference::address_of_static_offsets_vector(isolate())));
   2256     __ sd(a0, MemOperand(sp, 1 * kPointerSize));
   2257   }
   2258 
   2259   // For arguments 4 and 3 get string length, calculate start of string data
   2260   // and calculate the shift of the index (0 for one_byte and 1 for two byte).
   2261   __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   2262   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   2263   // Load the length from the original subject string from the previous stack
   2264   // frame. Therefore we have to use fp, which points exactly to two pointer
   2265   // sizes below the previous sp. (Because creating a new stack frame pushes
   2266   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
   2267   __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   2268   // If slice offset is not 0, load the length from the original sliced string.
   2269   // Argument 4, a3: End of string data
   2270   // Argument 3, a2: Start of string data
   2271   // Prepare start and end index of the input.
   2272   __ dsllv(t1, t0, a3);
   2273   __ daddu(t0, t2, t1);
   2274   __ dsllv(t1, a1, a3);
   2275   __ daddu(a2, t0, t1);
   2276 
   2277   __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
   2278 
   2279   __ SmiUntag(t2);
   2280   __ dsllv(t1, t2, a3);
   2281   __ daddu(a3, t0, t1);
   2282   // Argument 2 (a1): Previous index.
   2283   // Already there
   2284 
   2285   // Argument 1 (a0): Subject string.
   2286   __ mov(a0, subject);
   2287 
   2288   // Locate the code entry and call it.
   2289   __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
   2290   DirectCEntryStub stub(isolate());
   2291   stub.GenerateCall(masm, t9);
   2292 
   2293   __ LeaveExitFrame(false, no_reg, true);
   2294 
   2295   // v0: result
   2296   // subject: subject string (callee saved)
   2297   // regexp_data: RegExp data (callee saved)
   2298   // last_match_info_elements: Last match info elements (callee saved)
   2299   // Check the result.
   2300   Label success;
   2301   __ Branch(&success, eq, v0, Operand(1));
   2302   // We expect exactly one result since we force the called regexp to behave
   2303   // as non-global.
   2304   Label failure;
   2305   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
   2306   // If not exception it can only be retry. Handle that in the runtime system.
   2307   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   2308   // Result must now be exception. If there is no pending exception already a
   2309   // stack overflow (on the backtrack stack) was detected in RegExp code but
   2310   // haven't created the exception yet. Handle that in the runtime system.
   2311   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   2312   __ li(a1, Operand(isolate()->factory()->the_hole_value()));
   2313   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   2314                                       isolate())));
   2315   __ ld(v0, MemOperand(a2, 0));
   2316   __ Branch(&runtime, eq, v0, Operand(a1));
   2317 
   2318   // For exception, throw the exception again.
   2319   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
   2320 
   2321   __ bind(&failure);
   2322   // For failure and exception return null.
   2323   __ li(v0, Operand(isolate()->factory()->null_value()));
   2324   __ DropAndRet(4);
   2325 
   2326   // Process the result from the native regexp code.
   2327   __ bind(&success);
   2328 
   2329   __ lw(a1, UntagSmiFieldMemOperand(
   2330       regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   2331   // Calculate number of capture registers (number_of_captures + 1) * 2.
   2332   __ Daddu(a1, a1, Operand(1));
   2333   __ dsll(a1, a1, 1);  // Multiply by 2.
   2334 
   2335   __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
   2336   __ JumpIfSmi(a0, &runtime);
   2337   __ GetObjectType(a0, a2, a2);
   2338   __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
   2339   // Check that the JSArray is in fast case.
   2340   __ ld(last_match_info_elements,
   2341         FieldMemOperand(a0, JSArray::kElementsOffset));
   2342   __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   2343   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
   2344   __ Branch(&runtime, ne, a0, Operand(at));
   2345   // Check that the last match info has space for the capture registers and the
   2346   // additional information.
   2347   __ ld(a0,
   2348         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
   2349   __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
   2350 
   2351   __ SmiUntag(at, a0);
   2352   __ Branch(&runtime, gt, a2, Operand(at));
   2353 
   2354   // a1: number of capture registers
   2355   // subject: subject string
   2356   // Store the capture count.
   2357   __ SmiTag(a2, a1);  // To smi.
   2358   __ sd(a2, FieldMemOperand(last_match_info_elements,
   2359                              RegExpImpl::kLastCaptureCountOffset));
   2360   // Store last subject and last input.
   2361   __ sd(subject,
   2362          FieldMemOperand(last_match_info_elements,
   2363                          RegExpImpl::kLastSubjectOffset));
   2364   __ mov(a2, subject);
   2365   __ RecordWriteField(last_match_info_elements,
   2366                       RegExpImpl::kLastSubjectOffset,
   2367                       subject,
   2368                       a7,
   2369                       kRAHasNotBeenSaved,
   2370                       kDontSaveFPRegs);
   2371   __ mov(subject, a2);
   2372   __ sd(subject,
   2373          FieldMemOperand(last_match_info_elements,
   2374                          RegExpImpl::kLastInputOffset));
   2375   __ RecordWriteField(last_match_info_elements,
   2376                       RegExpImpl::kLastInputOffset,
   2377                       subject,
   2378                       a7,
   2379                       kRAHasNotBeenSaved,
   2380                       kDontSaveFPRegs);
   2381 
   2382   // Get the static offsets vector filled by the native regexp code.
   2383   ExternalReference address_of_static_offsets_vector =
   2384       ExternalReference::address_of_static_offsets_vector(isolate());
   2385   __ li(a2, Operand(address_of_static_offsets_vector));
   2386 
   2387   // a1: number of capture registers
   2388   // a2: offsets vector
   2389   Label next_capture, done;
   2390   // Capture register counter starts from number of capture registers and
   2391   // counts down until wrapping after zero.
   2392   __ Daddu(a0,
   2393          last_match_info_elements,
   2394          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
   2395   __ bind(&next_capture);
   2396   __ Dsubu(a1, a1, Operand(1));
   2397   __ Branch(&done, lt, a1, Operand(zero_reg));
   2398   // Read the value from the static offsets vector buffer.
   2399   __ lw(a3, MemOperand(a2, 0));
   2400   __ daddiu(a2, a2, kIntSize);
   2401   // Store the smi value in the last match info.
   2402   __ SmiTag(a3);
   2403   __ sd(a3, MemOperand(a0, 0));
   2404   __ Branch(&next_capture, USE_DELAY_SLOT);
   2405   __ daddiu(a0, a0, kPointerSize);  // In branch delay slot.
   2406 
   2407   __ bind(&done);
   2408 
   2409   // Return last match info.
   2410   __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
   2411   __ DropAndRet(4);
   2412 
   2413   // Do the runtime call to execute the regexp.
   2414   __ bind(&runtime);
   2415   __ TailCallRuntime(Runtime::kRegExpExec);
   2416 
   2417   // Deferred code for string handling.
   2418   // (6) Not a long external string?  If yes, go to (8).
   2419   __ bind(&not_seq_nor_cons);
   2420   // Go to (8).
   2421   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
   2422 
   2423   // (7) External string.  Make it, offset-wise, look like a sequential string.
   2424   __ bind(&external_string);
   2425   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   2426   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   2427   if (FLAG_debug_code) {
   2428     // Assert that we do not have a cons or slice (indirect strings) here.
   2429     // Sequential strings have already been ruled out.
   2430     __ And(at, a0, Operand(kIsIndirectStringMask));
   2431     __ Assert(eq,
   2432               kExternalStringExpectedButNotFound,
   2433               at,
   2434               Operand(zero_reg));
   2435   }
   2436   __ ld(subject,
   2437         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   2438   // Move the pointer so that offset-wise, it looks like a sequential string.
   2439   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   2440   __ Dsubu(subject,
   2441           subject,
   2442           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   2443   __ jmp(&seq_string);    // Go to (5).
   2444 
   2445   // (8) Short external string or not a string?  If yes, bail out to runtime.
   2446   __ bind(&not_long_external);
   2447   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
   2448   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   2449   __ Branch(&runtime, ne, at, Operand(zero_reg));
   2450 
   2451   // (9) Sliced string.  Replace subject with parent.  Go to (4).
   2452   // Load offset into t0 and replace subject string with parent.
   2453   __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   2454   __ SmiUntag(t0);
   2455   __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   2456   __ jmp(&check_underlying);  // Go to (4).
   2457 #endif  // V8_INTERPRETED_REGEXP
   2458 }
   2459 
   2460 
   2461 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
   2462   // a0 : number of arguments to the construct function
   2463   // a2 : feedback vector
   2464   // a3 : slot in feedback vector (Smi)
   2465   // a1 : the function to call
   2466   FrameScope scope(masm, StackFrame::INTERNAL);
   2467   const RegList kSavedRegs = 1 << 4 |  // a0
   2468                              1 << 5 |  // a1
   2469                              1 << 6 |  // a2
   2470                              1 << 7;   // a3
   2471 
   2472 
   2473   // Number-of-arguments register must be smi-tagged to call out.
   2474   __ SmiTag(a0);
   2475   __ MultiPush(kSavedRegs);
   2476 
   2477   __ CallStub(stub);
   2478 
   2479   __ MultiPop(kSavedRegs);
   2480   __ SmiUntag(a0);
   2481 }
   2482 
   2483 
   2484 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   2485   // Cache the called function in a feedback vector slot.  Cache states
   2486   // are uninitialized, monomorphic (indicated by a JSFunction), and
   2487   // megamorphic.
   2488   // a0 : number of arguments to the construct function
   2489   // a1 : the function to call
   2490   // a2 : feedback vector
   2491   // a3 : slot in feedback vector (Smi)
   2492   Label initialize, done, miss, megamorphic, not_array_function;
   2493 
   2494   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
   2495             masm->isolate()->heap()->megamorphic_symbol());
   2496   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
   2497             masm->isolate()->heap()->uninitialized_symbol());
   2498 
   2499   // Load the cache state into a5.
   2500   __ dsrl(a5, a3, 32 - kPointerSizeLog2);
   2501   __ Daddu(a5, a2, Operand(a5));
   2502   __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
   2503 
   2504   // A monomorphic cache hit or an already megamorphic state: invoke the
   2505   // function without changing the state.
   2506   // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
   2507   // this position in a symbol (see static asserts in type-feedback-vector.h).
   2508   Label check_allocation_site;
   2509   Register feedback_map = a6;
   2510   Register weak_value = t0;
   2511   __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
   2512   __ Branch(&done, eq, a1, Operand(weak_value));
   2513   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   2514   __ Branch(&done, eq, a5, Operand(at));
   2515   __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
   2516   __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
   2517   __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
   2518 
   2519   // If the weak cell is cleared, we have a new chance to become monomorphic.
   2520   __ JumpIfSmi(weak_value, &initialize);
   2521   __ jmp(&megamorphic);
   2522 
   2523   __ bind(&check_allocation_site);
   2524   // If we came here, we need to see if we are the array function.
   2525   // If we didn't have a matching function, and we didn't find the megamorph
   2526   // sentinel, then we have in the slot either some other function or an
   2527   // AllocationSite.
   2528   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   2529   __ Branch(&miss, ne, feedback_map, Operand(at));
   2530 
   2531   // Make sure the function is the Array() function
   2532   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
   2533   __ Branch(&megamorphic, ne, a1, Operand(a5));
   2534   __ jmp(&done);
   2535 
   2536   __ bind(&miss);
   2537 
   2538   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   2539   // megamorphic.
   2540   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
   2541   __ Branch(&initialize, eq, a5, Operand(at));
   2542   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   2543   // write-barrier is needed.
   2544   __ bind(&megamorphic);
   2545   __ dsrl(a5, a3, 32 - kPointerSizeLog2);
   2546   __ Daddu(a5, a2, Operand(a5));
   2547   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   2548   __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
   2549   __ jmp(&done);
   2550 
   2551   // An uninitialized cache is patched with the function.
   2552   __ bind(&initialize);
   2553   // Make sure the function is the Array() function.
   2554   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
   2555   __ Branch(&not_array_function, ne, a1, Operand(a5));
   2556 
   2557   // The target function is the Array constructor,
   2558   // Create an AllocationSite if we don't already have it, store it in the
   2559   // slot.
   2560   CreateAllocationSiteStub create_stub(masm->isolate());
   2561   CallStubInRecordCallTarget(masm, &create_stub);
   2562   __ Branch(&done);
   2563 
   2564   __ bind(&not_array_function);
   2565 
   2566   CreateWeakCellStub weak_cell_stub(masm->isolate());
   2567   CallStubInRecordCallTarget(masm, &weak_cell_stub);
   2568   __ bind(&done);
   2569 }
   2570 
   2571 
   2572 void CallConstructStub::Generate(MacroAssembler* masm) {
   2573   // a0 : number of arguments
   2574   // a1 : the function to call
   2575   // a2 : feedback vector
   2576   // a3 : slot in feedback vector (Smi, for RecordCallTarget)
   2577 
   2578   Label non_function;
   2579   // Check that the function is not a smi.
   2580   __ JumpIfSmi(a1, &non_function);
   2581   // Check that the function is a JSFunction.
   2582   __ GetObjectType(a1, a5, a5);
   2583   __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
   2584 
   2585   GenerateRecordCallTarget(masm);
   2586 
   2587   __ dsrl(at, a3, 32 - kPointerSizeLog2);
   2588   __ Daddu(a5, a2, at);
   2589   Label feedback_register_initialized;
   2590   // Put the AllocationSite from the feedback vector into a2, or undefined.
   2591   __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
   2592   __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
   2593   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   2594   __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
   2595   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
   2596   __ bind(&feedback_register_initialized);
   2597 
   2598   __ AssertUndefinedOrAllocationSite(a2, a5);
   2599 
   2600   // Pass function as new target.
   2601   __ mov(a3, a1);
   2602 
   2603   // Tail call to the function-specific construct stub (still in the caller
   2604   // context at this point).
   2605   __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   2606   __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
   2607   __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
   2608   __ Jump(at);
   2609 
   2610   __ bind(&non_function);
   2611   __ mov(a3, a1);
   2612   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   2613 }
   2614 
   2615 
   2616 // StringCharCodeAtGenerator.
   2617 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   2618   DCHECK(!a4.is(index_));
   2619   DCHECK(!a4.is(result_));
   2620   DCHECK(!a4.is(object_));
   2621 
   2622   // If the receiver is a smi trigger the non-string case.
   2623   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
   2624     __ JumpIfSmi(object_, receiver_not_string_);
   2625 
   2626     // Fetch the instance type of the receiver into result register.
   2627     __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2628     __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2629     // If the receiver is not a string trigger the non-string case.
   2630     __ And(a4, result_, Operand(kIsNotStringMask));
   2631     __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
   2632   }
   2633 
   2634   // If the index is non-smi trigger the non-smi case.
   2635   __ JumpIfNotSmi(index_, &index_not_smi_);
   2636 
   2637   __ bind(&got_smi_index_);
   2638 
   2639   // Check for index out of range.
   2640   __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
   2641   __ Branch(index_out_of_range_, ls, a4, Operand(index_));
   2642 
   2643   __ SmiUntag(index_);
   2644 
   2645   StringCharLoadGenerator::Generate(masm,
   2646                                     object_,
   2647                                     index_,
   2648                                     result_,
   2649                                     &call_runtime_);
   2650 
   2651   __ SmiTag(result_);
   2652   __ bind(&exit_);
   2653 }
   2654 
   2655 
   2656 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   2657   // a1 - function
   2658   // a3 - slot id
   2659   // a2 - vector
   2660   // a4 - allocation site (loaded from vector[slot])
   2661   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
   2662   __ Branch(miss, ne, a1, Operand(at));
   2663 
   2664   __ li(a0, Operand(arg_count()));
   2665 
   2666   // Increment the call count for monomorphic function calls.
   2667   __ dsrl(t0, a3, 32 - kPointerSizeLog2);
   2668   __ Daddu(a3, a2, Operand(t0));
   2669   __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
   2670   __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
   2671   __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
   2672 
   2673   __ mov(a2, a4);
   2674   __ mov(a3, a1);
   2675   ArrayConstructorStub stub(masm->isolate(), arg_count());
   2676   __ TailCallStub(&stub);
   2677 }
   2678 
   2679 
   2680 void CallICStub::Generate(MacroAssembler* masm) {
   2681   // a1 - function
   2682   // a3 - slot id (Smi)
   2683   // a2 - vector
   2684   Label extra_checks_or_miss, call, call_function;
   2685   int argc = arg_count();
   2686   ParameterCount actual(argc);
   2687 
   2688   // The checks. First, does r1 match the recorded monomorphic target?
   2689   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
   2690   __ Daddu(a4, a2, Operand(a4));
   2691   __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
   2692 
   2693   // We don't know that we have a weak cell. We might have a private symbol
   2694   // or an AllocationSite, but the memory is safe to examine.
   2695   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
   2696   // FixedArray.
   2697   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
   2698   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
   2699   // computed, meaning that it can't appear to be a pointer. If the low bit is
   2700   // 0, then hash is computed, but the 0 bit prevents the field from appearing
   2701   // to be a pointer.
   2702   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
   2703   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
   2704                     WeakCell::kValueOffset &&
   2705                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
   2706 
   2707   __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
   2708   __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
   2709 
   2710   // The compare above could have been a SMI/SMI comparison. Guard against this
   2711   // convincing us that we have a monomorphic JSFunction.
   2712   __ JumpIfSmi(a1, &extra_checks_or_miss);
   2713 
   2714   // Increment the call count for monomorphic function calls.
   2715   __ dsrl(t0, a3, 32 - kPointerSizeLog2);
   2716   __ Daddu(a3, a2, Operand(t0));
   2717   __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
   2718   __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
   2719   __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
   2720 
   2721   __ bind(&call_function);
   2722   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
   2723           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
   2724           USE_DELAY_SLOT);
   2725   __ li(a0, Operand(argc));  // In delay slot.
   2726 
   2727   __ bind(&extra_checks_or_miss);
   2728   Label uninitialized, miss, not_allocation_site;
   2729 
   2730   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   2731   __ Branch(&call, eq, a4, Operand(at));
   2732 
   2733   // Verify that a4 contains an AllocationSite
   2734   __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
   2735   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   2736   __ Branch(&not_allocation_site, ne, a5, Operand(at));
   2737 
   2738   HandleArrayCase(masm, &miss);
   2739 
   2740   __ bind(&not_allocation_site);
   2741 
   2742   // The following cases attempt to handle MISS cases without going to the
   2743   // runtime.
   2744   if (FLAG_trace_ic) {
   2745     __ Branch(&miss);
   2746   }
   2747 
   2748   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
   2749   __ Branch(&uninitialized, eq, a4, Operand(at));
   2750 
   2751   // We are going megamorphic. If the feedback is a JSFunction, it is fine
   2752   // to handle it here. More complex cases are dealt with in the runtime.
   2753   __ AssertNotSmi(a4);
   2754   __ GetObjectType(a4, a5, a5);
   2755   __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
   2756   __ dsrl(a4, a3, 32 - kPointerSizeLog2);
   2757   __ Daddu(a4, a2, Operand(a4));
   2758   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   2759   __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
   2760 
   2761   __ bind(&call);
   2762   __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
   2763           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
   2764           USE_DELAY_SLOT);
   2765   __ li(a0, Operand(argc));  // In delay slot.
   2766 
   2767   __ bind(&uninitialized);
   2768 
   2769   // We are going monomorphic, provided we actually have a JSFunction.
   2770   __ JumpIfSmi(a1, &miss);
   2771 
   2772   // Goto miss case if we do not have a function.
   2773   __ GetObjectType(a1, a4, a4);
   2774   __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
   2775 
   2776   // Make sure the function is not the Array() function, which requires special
   2777   // behavior on MISS.
   2778   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
   2779   __ Branch(&miss, eq, a1, Operand(a4));
   2780 
   2781   // Make sure the function belongs to the same native context.
   2782   __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
   2783   __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
   2784   __ ld(t1, NativeContextMemOperand());
   2785   __ Branch(&miss, ne, t0, Operand(t1));
   2786 
   2787   // Initialize the call counter.
   2788   __ dsrl(at, a3, 32 - kPointerSizeLog2);
   2789   __ Daddu(at, a2, Operand(at));
   2790   __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
   2791   __ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
   2792 
   2793   // Store the function. Use a stub since we need a frame for allocation.
   2794   // a2 - vector
   2795   // a3 - slot
   2796   // a1 - function
   2797   {
   2798     FrameScope scope(masm, StackFrame::INTERNAL);
   2799     CreateWeakCellStub create_stub(masm->isolate());
   2800     __ Push(a1);
   2801     __ CallStub(&create_stub);
   2802     __ Pop(a1);
   2803   }
   2804 
   2805   __ Branch(&call_function);
   2806 
   2807   // We are here because tracing is on or we encountered a MISS case we can't
   2808   // handle here.
   2809   __ bind(&miss);
   2810   GenerateMiss(masm);
   2811 
   2812   __ Branch(&call);
   2813 }
   2814 
   2815 
   2816 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   2817   FrameScope scope(masm, StackFrame::INTERNAL);
   2818 
   2819   // Push the receiver and the function and feedback info.
   2820   __ Push(a1, a2, a3);
   2821 
   2822   // Call the entry.
   2823   __ CallRuntime(Runtime::kCallIC_Miss);
   2824 
   2825   // Move result to a1 and exit the internal frame.
   2826   __ mov(a1, v0);
   2827 }
   2828 
   2829 
   2830 void StringCharCodeAtGenerator::GenerateSlow(
   2831     MacroAssembler* masm, EmbedMode embed_mode,
   2832     const RuntimeCallHelper& call_helper) {
   2833   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   2834 
   2835   // Index is not a smi.
   2836   __ bind(&index_not_smi_);
   2837   // If index is a heap number, try converting it to an integer.
   2838   __ CheckMap(index_,
   2839               result_,
   2840               Heap::kHeapNumberMapRootIndex,
   2841               index_not_number_,
   2842               DONT_DO_SMI_CHECK);
   2843   call_helper.BeforeCall(masm);
   2844   // Consumed by runtime conversion function:
   2845   if (embed_mode == PART_OF_IC_HANDLER) {
   2846     __ Push(LoadWithVectorDescriptor::VectorRegister(),
   2847             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
   2848   } else {
   2849     __ Push(object_, index_);
   2850   }
   2851   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
   2852     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
   2853   } else {
   2854     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
   2855     // NumberToSmi discards numbers that are not exact integers.
   2856     __ CallRuntime(Runtime::kNumberToSmi);
   2857   }
   2858 
   2859   // Save the conversion result before the pop instructions below
   2860   // have a chance to overwrite it.
   2861 
   2862   __ Move(index_, v0);
   2863   if (embed_mode == PART_OF_IC_HANDLER) {
   2864     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
   2865            LoadWithVectorDescriptor::SlotRegister(), object_);
   2866   } else {
   2867     __ pop(object_);
   2868   }
   2869   // Reload the instance type.
   2870   __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2871   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2872   call_helper.AfterCall(masm);
   2873   // If index is still not a smi, it must be out of range.
   2874   __ JumpIfNotSmi(index_, index_out_of_range_);
   2875   // Otherwise, return to the fast path.
   2876   __ Branch(&got_smi_index_);
   2877 
   2878   // Call runtime. We get here when the receiver is a string and the
   2879   // index is a number, but the code of getting the actual character
   2880   // is too complex (e.g., when the string needs to be flattened).
   2881   __ bind(&call_runtime_);
   2882   call_helper.BeforeCall(masm);
   2883   __ SmiTag(index_);
   2884   __ Push(object_, index_);
   2885   __ CallRuntime(Runtime::kStringCharCodeAtRT);
   2886 
   2887   __ Move(result_, v0);
   2888 
   2889   call_helper.AfterCall(masm);
   2890   __ jmp(&exit_);
   2891 
   2892   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   2893 }
   2894 
   2895 
   2896 // -------------------------------------------------------------------------
   2897 // StringCharFromCodeGenerator
   2898 
   2899 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   2900   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   2901   __ JumpIfNotSmi(code_, &slow_case_);
   2902   __ Branch(&slow_case_, hi, code_,
   2903             Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
   2904 
   2905   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   2906   // At this point code register contains smi tagged one_byte char code.
   2907   __ SmiScale(at, code_, kPointerSizeLog2);
   2908   __ Daddu(result_, result_, at);
   2909   __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   2910   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2911   __ Branch(&slow_case_, eq, result_, Operand(at));
   2912   __ bind(&exit_);
   2913 }
   2914 
   2915 
   2916 void StringCharFromCodeGenerator::GenerateSlow(
   2917     MacroAssembler* masm,
   2918     const RuntimeCallHelper& call_helper) {
   2919   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
   2920 
   2921   __ bind(&slow_case_);
   2922   call_helper.BeforeCall(masm);
   2923   __ push(code_);
   2924   __ CallRuntime(Runtime::kStringCharFromCode);
   2925   __ Move(result_, v0);
   2926 
   2927   call_helper.AfterCall(masm);
   2928   __ Branch(&exit_);
   2929 
   2930   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
   2931 }
   2932 
   2933 
   2934 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
   2935 
   2936 
   2937 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
   2938                                           Register dest,
   2939                                           Register src,
   2940                                           Register count,
   2941                                           Register scratch,
   2942                                           String::Encoding encoding) {
   2943   if (FLAG_debug_code) {
   2944     // Check that destination is word aligned.
   2945     __ And(scratch, dest, Operand(kPointerAlignmentMask));
   2946     __ Check(eq,
   2947              kDestinationOfCopyNotAligned,
   2948              scratch,
   2949              Operand(zero_reg));
   2950   }
   2951 
   2952   // Assumes word reads and writes are little endian.
   2953   // Nothing to do for zero characters.
   2954   Label done;
   2955 
   2956   if (encoding == String::TWO_BYTE_ENCODING) {
   2957     __ Daddu(count, count, count);
   2958   }
   2959 
   2960   Register limit = count;  // Read until dest equals this.
   2961   __ Daddu(limit, dest, Operand(count));
   2962 
   2963   Label loop_entry, loop;
   2964   // Copy bytes from src to dest until dest hits limit.
   2965   __ Branch(&loop_entry);
   2966   __ bind(&loop);
   2967   __ lbu(scratch, MemOperand(src));
   2968   __ daddiu(src, src, 1);
   2969   __ sb(scratch, MemOperand(dest));
   2970   __ daddiu(dest, dest, 1);
   2971   __ bind(&loop_entry);
   2972   __ Branch(&loop, lt, dest, Operand(limit));
   2973 
   2974   __ bind(&done);
   2975 }
   2976 
   2977 
   2978 void SubStringStub::Generate(MacroAssembler* masm) {
   2979   Label runtime;
   2980   // Stack frame on entry.
   2981   //  ra: return address
   2982   //  sp[0]: to
   2983   //  sp[4]: from
   2984   //  sp[8]: string
   2985 
   2986   // This stub is called from the native-call %_SubString(...), so
   2987   // nothing can be assumed about the arguments. It is tested that:
   2988   //  "string" is a sequential string,
   2989   //  both "from" and "to" are smis, and
   2990   //  0 <= from <= to <= string.length.
   2991   // If any of these assumptions fail, we call the runtime system.
   2992 
   2993   const int kToOffset = 0 * kPointerSize;
   2994   const int kFromOffset = 1 * kPointerSize;
   2995   const int kStringOffset = 2 * kPointerSize;
   2996 
   2997   __ ld(a2, MemOperand(sp, kToOffset));
   2998   __ ld(a3, MemOperand(sp, kFromOffset));
   2999 
   3000   STATIC_ASSERT(kSmiTag == 0);
   3001 
   3002   // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
   3003   // safe in this case.
   3004   __ JumpIfNotSmi(a2, &runtime);
   3005   __ JumpIfNotSmi(a3, &runtime);
   3006   // Both a2 and a3 are untagged integers.
   3007 
   3008   __ SmiUntag(a2, a2);
   3009   __ SmiUntag(a3, a3);
   3010   __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
   3011 
   3012   __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
   3013   __ Dsubu(a2, a2, a3);
   3014 
   3015   // Make sure first argument is a string.
   3016   __ ld(v0, MemOperand(sp, kStringOffset));
   3017   __ JumpIfSmi(v0, &runtime);
   3018   __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   3019   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
   3020   __ And(a4, a1, Operand(kIsNotStringMask));
   3021 
   3022   __ Branch(&runtime, ne, a4, Operand(zero_reg));
   3023 
   3024   Label single_char;
   3025   __ Branch(&single_char, eq, a2, Operand(1));
   3026 
   3027   // Short-cut for the case of trivial substring.
   3028   Label return_v0;
   3029   // v0: original string
   3030   // a2: result string length
   3031   __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
   3032   __ SmiUntag(a4);
   3033   // Return original string.
   3034   __ Branch(&return_v0, eq, a2, Operand(a4));
   3035   // Longer than original string's length or negative: unsafe arguments.
   3036   __ Branch(&runtime, hi, a2, Operand(a4));
   3037   // Shorter than original string's length: an actual substring.
   3038 
   3039   // Deal with different string types: update the index if necessary
   3040   // and put the underlying string into a5.
   3041   // v0: original string
   3042   // a1: instance type
   3043   // a2: length
   3044   // a3: from index (untagged)
   3045   Label underlying_unpacked, sliced_string, seq_or_external_string;
   3046   // If the string is not indirect, it can only be sequential or external.
   3047   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
   3048   STATIC_ASSERT(kIsIndirectStringMask != 0);
   3049   __ And(a4, a1, Operand(kIsIndirectStringMask));
   3050   __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
   3051   // a4 is used as a scratch register and can be overwritten in either case.
   3052   __ And(a4, a1, Operand(kSlicedNotConsMask));
   3053   __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
   3054   // Cons string.  Check whether it is flat, then fetch first part.
   3055   __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
   3056   __ LoadRoot(a4, Heap::kempty_stringRootIndex);
   3057   __ Branch(&runtime, ne, a5, Operand(a4));
   3058   __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
   3059   // Update instance type.
   3060   __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
   3061   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
   3062   __ jmp(&underlying_unpacked);
   3063 
   3064   __ bind(&sliced_string);
   3065   // Sliced string.  Fetch parent and correct start index by offset.
   3066   __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
   3067   __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
   3068   __ SmiUntag(a4);  // Add offset to index.
   3069   __ Daddu(a3, a3, a4);
   3070   // Update instance type.
   3071   __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
   3072   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
   3073   __ jmp(&underlying_unpacked);
   3074 
   3075   __ bind(&seq_or_external_string);
   3076   // Sequential or external string.  Just move string to the expected register.
   3077   __ mov(a5, v0);
   3078 
   3079   __ bind(&underlying_unpacked);
   3080 
   3081   if (FLAG_string_slices) {
   3082     Label copy_routine;
   3083     // a5: underlying subject string
   3084     // a1: instance type of underlying subject string
   3085     // a2: length
   3086     // a3: adjusted start index (untagged)
   3087     // Short slice.  Copy instead of slicing.
   3088     __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
   3089     // Allocate new sliced string.  At this point we do not reload the instance
   3090     // type including the string encoding because we simply rely on the info
   3091     // provided by the original string.  It does not matter if the original
   3092     // string's encoding is wrong because we always have to recheck encoding of
   3093     // the newly created string's parent anyways due to externalized strings.
   3094     Label two_byte_slice, set_slice_header;
   3095     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   3096     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   3097     __ And(a4, a1, Operand(kStringEncodingMask));
   3098     __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
   3099     __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
   3100     __ jmp(&set_slice_header);
   3101     __ bind(&two_byte_slice);
   3102     __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
   3103     __ bind(&set_slice_header);
   3104     __ SmiTag(a3);
   3105     __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
   3106     __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
   3107     __ jmp(&return_v0);
   3108 
   3109     __ bind(&copy_routine);
   3110   }
   3111 
   3112   // a5: underlying subject string
   3113   // a1: instance type of underlying subject string
   3114   // a2: length
   3115   // a3: adjusted start index (untagged)
   3116   Label two_byte_sequential, sequential_string, allocate_result;
   3117   STATIC_ASSERT(kExternalStringTag != 0);
   3118   STATIC_ASSERT(kSeqStringTag == 0);
   3119   __ And(a4, a1, Operand(kExternalStringTag));
   3120   __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
   3121 
   3122   // Handle external string.
   3123   // Rule out short external strings.
   3124   STATIC_ASSERT(kShortExternalStringTag != 0);
   3125   __ And(a4, a1, Operand(kShortExternalStringTag));
   3126   __ Branch(&runtime, ne, a4, Operand(zero_reg));
   3127   __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
   3128   // a5 already points to the first character of underlying string.
   3129   __ jmp(&allocate_result);
   3130 
   3131   __ bind(&sequential_string);
   3132   // Locate first character of underlying subject string.
   3133   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   3134   __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   3135 
   3136   __ bind(&allocate_result);
   3137   // Sequential acii string.  Allocate the result.
   3138   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   3139   __ And(a4, a1, Operand(kStringEncodingMask));
   3140   __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
   3141 
   3142   // Allocate and copy the resulting one_byte string.
   3143   __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
   3144 
   3145   // Locate first character of substring to copy.
   3146   __ Daddu(a5, a5, a3);
   3147 
   3148   // Locate first character of result.
   3149   __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   3150 
   3151   // v0: result string
   3152   // a1: first character of result string
   3153   // a2: result string length
   3154   // a5: first character of substring to copy
   3155   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3156   StringHelper::GenerateCopyCharacters(
   3157       masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
   3158   __ jmp(&return_v0);
   3159 
   3160   // Allocate and copy the resulting two-byte string.
   3161   __ bind(&two_byte_sequential);
   3162   __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
   3163 
   3164   // Locate first character of substring to copy.
   3165   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   3166   __ dsll(a4, a3, 1);
   3167   __ Daddu(a5, a5, a4);
   3168   // Locate first character of result.
   3169   __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   3170 
   3171   // v0: result string.
   3172   // a1: first character of result.
   3173   // a2: result length.
   3174   // a5: first character of substring to copy.
   3175   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3176   StringHelper::GenerateCopyCharacters(
   3177       masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
   3178 
   3179   __ bind(&return_v0);
   3180   Counters* counters = isolate()->counters();
   3181   __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
   3182   __ DropAndRet(3);
   3183 
   3184   // Just jump to runtime to create the sub string.
   3185   __ bind(&runtime);
   3186   __ TailCallRuntime(Runtime::kSubString);
   3187 
   3188   __ bind(&single_char);
   3189   // v0: original string
   3190   // a1: instance type
   3191   // a2: length
   3192   // a3: from index (untagged)
   3193   __ SmiTag(a3);
   3194   StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
   3195                                   STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
   3196   generator.GenerateFast(masm);
   3197   __ DropAndRet(3);
   3198   generator.SkipSlow(masm, &runtime);
   3199 }
   3200 
   3201 
   3202 void ToNumberStub::Generate(MacroAssembler* masm) {
   3203   // The ToNumber stub takes one argument in a0.
   3204   Label not_smi;
   3205   __ JumpIfNotSmi(a0, &not_smi);
   3206   __ Ret(USE_DELAY_SLOT);
   3207   __ mov(v0, a0);
   3208   __ bind(&not_smi);
   3209 
   3210   Label not_heap_number;
   3211   __ ld(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
   3212   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
   3213   // a0: object
   3214   // a1: instance type.
   3215   __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
   3216   __ Ret(USE_DELAY_SLOT);
   3217   __ mov(v0, a0);
   3218   __ bind(&not_heap_number);
   3219 
   3220   Label not_string, slow_string;
   3221   __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
   3222   // Check if string has a cached array index.
   3223   __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
   3224   __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
   3225   __ Branch(&slow_string, ne, at, Operand(zero_reg));
   3226   __ IndexFromHash(a2, a0);
   3227   __ Ret(USE_DELAY_SLOT);
   3228   __ mov(v0, a0);
   3229   __ bind(&slow_string);
   3230   __ push(a0);  // Push argument.
   3231   __ TailCallRuntime(Runtime::kStringToNumber);
   3232   __ bind(&not_string);
   3233 
   3234   Label not_oddball;
   3235   __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
   3236   __ Ret(USE_DELAY_SLOT);
   3237   __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
   3238   __ bind(&not_oddball);
   3239 
   3240   __ push(a0);  // Push argument.
   3241   __ TailCallRuntime(Runtime::kToNumber);
   3242 }
   3243 
   3244 
   3245 void ToLengthStub::Generate(MacroAssembler* masm) {
   3246   // The ToLength stub takes on argument in a0.
   3247   Label not_smi, positive_smi;
   3248   __ JumpIfNotSmi(a0, &not_smi);
   3249   STATIC_ASSERT(kSmiTag == 0);
   3250   __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
   3251   __ mov(a0, zero_reg);
   3252   __ bind(&positive_smi);
   3253   __ Ret(USE_DELAY_SLOT);
   3254   __ mov(v0, a0);
   3255   __ bind(&not_smi);
   3256 
   3257   __ push(a0);  // Push argument.
   3258   __ TailCallRuntime(Runtime::kToLength);
   3259 }
   3260 
   3261 
   3262 void ToStringStub::Generate(MacroAssembler* masm) {
   3263   // The ToString stub takes on argument in a0.
   3264   Label is_number;
   3265   __ JumpIfSmi(a0, &is_number);
   3266 
   3267   Label not_string;
   3268   __ GetObjectType(a0, a1, a1);
   3269   // a0: receiver
   3270   // a1: receiver instance type
   3271   __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
   3272   __ Ret(USE_DELAY_SLOT);
   3273   __ mov(v0, a0);
   3274   __ bind(&not_string);
   3275 
   3276   Label not_heap_number;
   3277   __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
   3278   __ bind(&is_number);
   3279   NumberToStringStub stub(isolate());
   3280   __ TailCallStub(&stub);
   3281   __ bind(&not_heap_number);
   3282 
   3283   Label not_oddball;
   3284   __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
   3285   __ Ret(USE_DELAY_SLOT);
   3286   __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
   3287   __ bind(&not_oddball);
   3288 
   3289   __ push(a0);  // Push argument.
   3290   __ TailCallRuntime(Runtime::kToString);
   3291 }
   3292 
   3293 
   3294 void StringHelper::GenerateFlatOneByteStringEquals(
   3295     MacroAssembler* masm, Register left, Register right, Register scratch1,
   3296     Register scratch2, Register scratch3) {
   3297   Register length = scratch1;
   3298 
   3299   // Compare lengths.
   3300   Label strings_not_equal, check_zero_length;
   3301   __ ld(length, FieldMemOperand(left, String::kLengthOffset));
   3302   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
   3303   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
   3304   __ bind(&strings_not_equal);
   3305   // Can not put li in delayslot, it has multi instructions.
   3306   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
   3307   __ Ret();
   3308 
   3309   // Check if the length is zero.
   3310   Label compare_chars;
   3311   __ bind(&check_zero_length);
   3312   STATIC_ASSERT(kSmiTag == 0);
   3313   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
   3314   DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
   3315   __ Ret(USE_DELAY_SLOT);
   3316   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   3317 
   3318   // Compare characters.
   3319   __ bind(&compare_chars);
   3320 
   3321   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
   3322                                   v0, &strings_not_equal);
   3323 
   3324   // Characters are equal.
   3325   __ Ret(USE_DELAY_SLOT);
   3326   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   3327 }
   3328 
   3329 
   3330 void StringHelper::GenerateCompareFlatOneByteStrings(
   3331     MacroAssembler* masm, Register left, Register right, Register scratch1,
   3332     Register scratch2, Register scratch3, Register scratch4) {
   3333   Label result_not_equal, compare_lengths;
   3334   // Find minimum length and length difference.
   3335   __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
   3336   __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
   3337   __ Dsubu(scratch3, scratch1, Operand(scratch2));
   3338   Register length_delta = scratch3;
   3339   __ slt(scratch4, scratch2, scratch1);
   3340   __ Movn(scratch1, scratch2, scratch4);
   3341   Register min_length = scratch1;
   3342   STATIC_ASSERT(kSmiTag == 0);
   3343   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
   3344 
   3345   // Compare loop.
   3346   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
   3347                                   scratch4, v0, &result_not_equal);
   3348 
   3349   // Compare lengths - strings up to min-length are equal.
   3350   __ bind(&compare_lengths);
   3351   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   3352   // Use length_delta as result if it's zero.
   3353   __ mov(scratch2, length_delta);
   3354   __ mov(scratch4, zero_reg);
   3355   __ mov(v0, zero_reg);
   3356 
   3357   __ bind(&result_not_equal);
   3358   // Conditionally update the result based either on length_delta or
   3359   // the last comparion performed in the loop above.
   3360   Label ret;
   3361   __ Branch(&ret, eq, scratch2, Operand(scratch4));
   3362   __ li(v0, Operand(Smi::FromInt(GREATER)));
   3363   __ Branch(&ret, gt, scratch2, Operand(scratch4));
   3364   __ li(v0, Operand(Smi::FromInt(LESS)));
   3365   __ bind(&ret);
   3366   __ Ret();
   3367 }
   3368 
   3369 
   3370 void StringHelper::GenerateOneByteCharsCompareLoop(
   3371     MacroAssembler* masm, Register left, Register right, Register length,
   3372     Register scratch1, Register scratch2, Register scratch3,
   3373     Label* chars_not_equal) {
   3374   // Change index to run from -length to -1 by adding length to string
   3375   // start. This means that loop ends when index reaches zero, which
   3376   // doesn't need an additional compare.
   3377   __ SmiUntag(length);
   3378   __ Daddu(scratch1, length,
   3379           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   3380   __ Daddu(left, left, Operand(scratch1));
   3381   __ Daddu(right, right, Operand(scratch1));
   3382   __ Dsubu(length, zero_reg, length);
   3383   Register index = length;  // index = -length;
   3384 
   3385 
   3386   // Compare loop.
   3387   Label loop;
   3388   __ bind(&loop);
   3389   __ Daddu(scratch3, left, index);
   3390   __ lbu(scratch1, MemOperand(scratch3));
   3391   __ Daddu(scratch3, right, index);
   3392   __ lbu(scratch2, MemOperand(scratch3));
   3393   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
   3394   __ Daddu(index, index, 1);
   3395   __ Branch(&loop, ne, index, Operand(zero_reg));
   3396 }
   3397 
   3398 
   3399 void StringCompareStub::Generate(MacroAssembler* masm) {
   3400   // ----------- S t a t e -------------
   3401   //  -- a1    : left
   3402   //  -- a0    : right
   3403   //  -- ra    : return address
   3404   // -----------------------------------
   3405   __ AssertString(a1);
   3406   __ AssertString(a0);
   3407 
   3408   Label not_same;
   3409   __ Branch(&not_same, ne, a0, Operand(a1));
   3410   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   3411   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
   3412                       a2);
   3413   __ Ret();
   3414 
   3415   __ bind(&not_same);
   3416 
   3417   // Check that both objects are sequential one-byte strings.
   3418   Label runtime;
   3419   __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
   3420 
   3421   // Compare flat ASCII strings natively.
   3422   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
   3423                       a3);
   3424   StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
   3425 
   3426   __ bind(&runtime);
   3427   __ Push(a1, a0);
   3428   __ TailCallRuntime(Runtime::kStringCompare);
   3429 }
   3430 
   3431 
   3432 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   3433   // ----------- S t a t e -------------
   3434   //  -- a1    : left
   3435   //  -- a0    : right
   3436   //  -- ra    : return address
   3437   // -----------------------------------
   3438 
   3439   // Load a2 with the allocation site. We stick an undefined dummy value here
   3440   // and replace it with the real allocation site later when we instantiate this
   3441   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   3442   __ li(a2, handle(isolate()->heap()->undefined_value()));
   3443 
   3444   // Make sure that we actually patched the allocation site.
   3445   if (FLAG_debug_code) {
   3446     __ And(at, a2, Operand(kSmiTagMask));
   3447     __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
   3448     __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
   3449     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   3450     __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
   3451   }
   3452 
   3453   // Tail call into the stub that handles binary operations with allocation
   3454   // sites.
   3455   BinaryOpWithAllocationSiteStub stub(isolate(), state());
   3456   __ TailCallStub(&stub);
   3457 }
   3458 
   3459 
   3460 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   3461   DCHECK_EQ(CompareICState::BOOLEAN, state());
   3462   Label miss;
   3463 
   3464   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   3465   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   3466   if (op() != Token::EQ_STRICT && is_strong(strength())) {
   3467     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   3468   } else {
   3469     if (!Token::IsEqualityOp(op())) {
   3470       __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
   3471       __ AssertSmi(a1);
   3472       __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
   3473       __ AssertSmi(a0);
   3474     }
   3475     __ Ret(USE_DELAY_SLOT);
   3476     __ Dsubu(v0, a1, a0);
   3477   }
   3478 
   3479   __ bind(&miss);
   3480   GenerateMiss(masm);
   3481 }
   3482 
   3483 
   3484 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   3485   DCHECK(state() == CompareICState::SMI);
   3486   Label miss;
   3487   __ Or(a2, a1, a0);
   3488   __ JumpIfNotSmi(a2, &miss);
   3489 
   3490   if (GetCondition() == eq) {
   3491     // For equality we do not care about the sign of the result.
   3492     __ Ret(USE_DELAY_SLOT);
   3493     __ Dsubu(v0, a0, a1);
   3494   } else {
   3495     // Untag before subtracting to avoid handling overflow.
   3496     __ SmiUntag(a1);
   3497     __ SmiUntag(a0);
   3498     __ Ret(USE_DELAY_SLOT);
   3499     __ Dsubu(v0, a1, a0);
   3500   }
   3501 
   3502   __ bind(&miss);
   3503   GenerateMiss(masm);
   3504 }
   3505 
   3506 
   3507 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
   3508   DCHECK(state() == CompareICState::NUMBER);
   3509 
   3510   Label generic_stub;
   3511   Label unordered, maybe_undefined1, maybe_undefined2;
   3512   Label miss;
   3513 
   3514   if (left() == CompareICState::SMI) {
   3515     __ JumpIfNotSmi(a1, &miss);
   3516   }
   3517   if (right() == CompareICState::SMI) {
   3518     __ JumpIfNotSmi(a0, &miss);
   3519   }
   3520 
   3521   // Inlining the double comparison and falling back to the general compare
   3522   // stub if NaN is involved.
   3523   // Load left and right operand.
   3524   Label done, left, left_smi, right_smi;
   3525   __ JumpIfSmi(a0, &right_smi);
   3526   __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
   3527               DONT_DO_SMI_CHECK);
   3528   __ Dsubu(a2, a0, Operand(kHeapObjectTag));
   3529   __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
   3530   __ Branch(&left);
   3531   __ bind(&right_smi);
   3532   __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
   3533   FPURegister single_scratch = f6;
   3534   __ mtc1(a2, single_scratch);
   3535   __ cvt_d_w(f2, single_scratch);
   3536 
   3537   __ bind(&left);
   3538   __ JumpIfSmi(a1, &left_smi);
   3539   __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
   3540               DONT_DO_SMI_CHECK);
   3541   __ Dsubu(a2, a1, Operand(kHeapObjectTag));
   3542   __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
   3543   __ Branch(&done);
   3544   __ bind(&left_smi);
   3545   __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
   3546   single_scratch = f8;
   3547   __ mtc1(a2, single_scratch);
   3548   __ cvt_d_w(f0, single_scratch);
   3549 
   3550   __ bind(&done);
   3551 
   3552   // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
   3553   Label fpu_eq, fpu_lt;
   3554   // Test if equal, and also handle the unordered/NaN case.
   3555   __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
   3556 
   3557   // Test if less (unordered case is already handled).
   3558   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
   3559 
   3560   // Otherwise it's greater, so just fall thru, and return.
   3561   DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
   3562   __ Ret(USE_DELAY_SLOT);
   3563   __ li(v0, Operand(GREATER));
   3564 
   3565   __ bind(&fpu_eq);
   3566   __ Ret(USE_DELAY_SLOT);
   3567   __ li(v0, Operand(EQUAL));
   3568 
   3569   __ bind(&fpu_lt);
   3570   __ Ret(USE_DELAY_SLOT);
   3571   __ li(v0, Operand(LESS));
   3572 
   3573   __ bind(&unordered);
   3574   __ bind(&generic_stub);
   3575   CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
   3576                      CompareICState::GENERIC, CompareICState::GENERIC);
   3577   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   3578 
   3579   __ bind(&maybe_undefined1);
   3580   if (Token::IsOrderedRelationalCompareOp(op())) {
   3581     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   3582     __ Branch(&miss, ne, a0, Operand(at));
   3583     __ JumpIfSmi(a1, &unordered);
   3584     __ GetObjectType(a1, a2, a2);
   3585     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
   3586     __ jmp(&unordered);
   3587   }
   3588 
   3589   __ bind(&maybe_undefined2);
   3590   if (Token::IsOrderedRelationalCompareOp(op())) {
   3591     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   3592     __ Branch(&unordered, eq, a1, Operand(at));
   3593   }
   3594 
   3595   __ bind(&miss);
   3596   GenerateMiss(masm);
   3597 }
   3598 
   3599 
   3600 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   3601   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   3602   Label miss;
   3603 
   3604   // Registers containing left and right operands respectively.
   3605   Register left = a1;
   3606   Register right = a0;
   3607   Register tmp1 = a2;
   3608   Register tmp2 = a3;
   3609 
   3610   // Check that both operands are heap objects.
   3611   __ JumpIfEitherSmi(left, right, &miss);
   3612 
   3613   // Check that both operands are internalized strings.
   3614   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   3615   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   3616   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   3617   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   3618   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   3619   __ Or(tmp1, tmp1, Operand(tmp2));
   3620   __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   3621   __ Branch(&miss, ne, at, Operand(zero_reg));
   3622 
   3623   // Make sure a0 is non-zero. At this point input operands are
   3624   // guaranteed to be non-zero.
   3625   DCHECK(right.is(a0));
   3626   STATIC_ASSERT(EQUAL == 0);
   3627   STATIC_ASSERT(kSmiTag == 0);
   3628   __ mov(v0, right);
   3629   // Internalized strings are compared by identity.
   3630   __ Ret(ne, left, Operand(right));
   3631   DCHECK(is_int16(EQUAL));
   3632   __ Ret(USE_DELAY_SLOT);
   3633   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   3634 
   3635   __ bind(&miss);
   3636   GenerateMiss(masm);
   3637 }
   3638 
   3639 
   3640 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
   3641   DCHECK(state() == CompareICState::UNIQUE_NAME);
   3642   DCHECK(GetCondition() == eq);
   3643   Label miss;
   3644 
   3645   // Registers containing left and right operands respectively.
   3646   Register left = a1;
   3647   Register right = a0;
   3648   Register tmp1 = a2;
   3649   Register tmp2 = a3;
   3650 
   3651   // Check that both operands are heap objects.
   3652   __ JumpIfEitherSmi(left, right, &miss);
   3653 
   3654   // Check that both operands are unique names. This leaves the instance
   3655   // types loaded in tmp1 and tmp2.
   3656   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   3657   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   3658   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   3659   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   3660 
   3661   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
   3662   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
   3663 
   3664   // Use a0 as result
   3665   __ mov(v0, a0);
   3666 
   3667   // Unique names are compared by identity.
   3668   Label done;
   3669   __ Branch(&done, ne, left, Operand(right));
   3670   // Make sure a0 is non-zero. At this point input operands are
   3671   // guaranteed to be non-zero.
   3672   DCHECK(right.is(a0));
   3673   STATIC_ASSERT(EQUAL == 0);
   3674   STATIC_ASSERT(kSmiTag == 0);
   3675   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   3676   __ bind(&done);
   3677   __ Ret();
   3678 
   3679   __ bind(&miss);
   3680   GenerateMiss(masm);
   3681 }
   3682 
   3683 
   3684 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
   3685   DCHECK(state() == CompareICState::STRING);
   3686   Label miss;
   3687 
   3688   bool equality = Token::IsEqualityOp(op());
   3689 
   3690   // Registers containing left and right operands respectively.
   3691   Register left = a1;
   3692   Register right = a0;
   3693   Register tmp1 = a2;
   3694   Register tmp2 = a3;
   3695   Register tmp3 = a4;
   3696   Register tmp4 = a5;
   3697   Register tmp5 = a6;
   3698 
   3699   // Check that both operands are heap objects.
   3700   __ JumpIfEitherSmi(left, right, &miss);
   3701 
   3702   // Check that both operands are strings. This leaves the instance
   3703   // types loaded in tmp1 and tmp2.
   3704   __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   3705   __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   3706   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   3707   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   3708   STATIC_ASSERT(kNotStringTag != 0);
   3709   __ Or(tmp3, tmp1, tmp2);
   3710   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
   3711   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
   3712 
   3713   // Fast check for identical strings.
   3714   Label left_ne_right;
   3715   STATIC_ASSERT(EQUAL == 0);
   3716   STATIC_ASSERT(kSmiTag == 0);
   3717   __ Branch(&left_ne_right, ne, left, Operand(right));
   3718   __ Ret(USE_DELAY_SLOT);
   3719   __ mov(v0, zero_reg);  // In the delay slot.
   3720   __ bind(&left_ne_right);
   3721 
   3722   // Handle not identical strings.
   3723 
   3724   // Check that both strings are internalized strings. If they are, we're done
   3725   // because we already know they are not identical. We know they are both
   3726   // strings.
   3727   if (equality) {
   3728     DCHECK(GetCondition() == eq);
   3729     STATIC_ASSERT(kInternalizedTag == 0);
   3730     __ Or(tmp3, tmp1, Operand(tmp2));
   3731     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
   3732     Label is_symbol;
   3733     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
   3734     // Make sure a0 is non-zero. At this point input operands are
   3735     // guaranteed to be non-zero.
   3736     DCHECK(right.is(a0));
   3737     __ Ret(USE_DELAY_SLOT);
   3738     __ mov(v0, a0);  // In the delay slot.
   3739     __ bind(&is_symbol);
   3740   }
   3741 
   3742   // Check that both strings are sequential one_byte.
   3743   Label runtime;
   3744   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
   3745                                                     &runtime);
   3746 
   3747   // Compare flat one_byte strings. Returns when done.
   3748   if (equality) {
   3749     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
   3750                                                   tmp3);
   3751   } else {
   3752     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
   3753                                                     tmp2, tmp3, tmp4);
   3754   }
   3755 
   3756   // Handle more complex cases in runtime.
   3757   __ bind(&runtime);
   3758   __ Push(left, right);
   3759   if (equality) {
   3760     __ TailCallRuntime(Runtime::kStringEquals);
   3761   } else {
   3762     __ TailCallRuntime(Runtime::kStringCompare);
   3763   }
   3764 
   3765   __ bind(&miss);
   3766   GenerateMiss(masm);
   3767 }
   3768 
   3769 
   3770 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
   3771   DCHECK_EQ(CompareICState::RECEIVER, state());
   3772   Label miss;
   3773   __ And(a2, a1, Operand(a0));
   3774   __ JumpIfSmi(a2, &miss);
   3775 
   3776   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   3777   __ GetObjectType(a0, a2, a2);
   3778   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
   3779   __ GetObjectType(a1, a2, a2);
   3780   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
   3781 
   3782   DCHECK_EQ(eq, GetCondition());
   3783   __ Ret(USE_DELAY_SLOT);
   3784   __ dsubu(v0, a0, a1);
   3785 
   3786   __ bind(&miss);
   3787   GenerateMiss(masm);
   3788 }
   3789 
   3790 
   3791 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   3792   Label miss;
   3793   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   3794   __ And(a2, a1, a0);
   3795   __ JumpIfSmi(a2, &miss);
   3796   __ GetWeakValue(a4, cell);
   3797   __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
   3798   __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
   3799   __ Branch(&miss, ne, a2, Operand(a4));
   3800   __ Branch(&miss, ne, a3, Operand(a4));
   3801 
   3802   if (Token::IsEqualityOp(op())) {
   3803     __ Ret(USE_DELAY_SLOT);
   3804     __ dsubu(v0, a0, a1);
   3805   } else if (is_strong(strength())) {
   3806     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   3807   } else {
   3808     if (op() == Token::LT || op() == Token::LTE) {
   3809       __ li(a2, Operand(Smi::FromInt(GREATER)));
   3810     } else {
   3811       __ li(a2, Operand(Smi::FromInt(LESS)));
   3812     }
   3813     __ Push(a1, a0, a2);
   3814     __ TailCallRuntime(Runtime::kCompare);
   3815   }
   3816 
   3817   __ bind(&miss);
   3818   GenerateMiss(masm);
   3819 }
   3820 
   3821 
   3822 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   3823   {
   3824     // Call the runtime system in a fresh internal frame.
   3825     FrameScope scope(masm, StackFrame::INTERNAL);
   3826     __ Push(a1, a0);
   3827     __ Push(ra, a1, a0);
   3828     __ li(a4, Operand(Smi::FromInt(op())));
   3829     __ daddiu(sp, sp, -kPointerSize);
   3830     __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
   3831                    USE_DELAY_SLOT);
   3832     __ sd(a4, MemOperand(sp));  // In the delay slot.
   3833     // Compute the entry point of the rewritten stub.
   3834     __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
   3835     // Restore registers.
   3836     __ Pop(a1, a0, ra);
   3837   }
   3838   __ Jump(a2);
   3839 }
   3840 
   3841 
   3842 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   3843   // Make place for arguments to fit C calling convention. Most of the callers
   3844   // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
   3845   // so they handle stack restoring and we don't have to do that here.
   3846   // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
   3847   // kCArgsSlotsSize stack space after the call.
   3848   __ daddiu(sp, sp, -kCArgsSlotsSize);
   3849   // Place the return address on the stack, making the call
   3850   // GC safe. The RegExp backend also relies on this.
   3851   __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
   3852   __ Call(t9);  // Call the C++ function.
   3853   __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
   3854 
   3855   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
   3856     // In case of an error the return address may point to a memory area
   3857     // filled with kZapValue by the GC.
   3858     // Dereference the address and check for this.
   3859     __ Uld(a4, MemOperand(t9));
   3860     __ Assert(ne, kReceivedInvalidReturnAddress, a4,
   3861         Operand(reinterpret_cast<uint64_t>(kZapValue)));
   3862   }
   3863   __ Jump(t9);
   3864 }
   3865 
   3866 
   3867 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   3868                                     Register target) {
   3869   intptr_t loc =
   3870       reinterpret_cast<intptr_t>(GetCode().location());
   3871   __ Move(t9, target);
   3872   __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
   3873   __ Call(at);
   3874 }
   3875 
   3876 
   3877 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
   3878                                                       Label* miss,
   3879                                                       Label* done,
   3880                                                       Register receiver,
   3881                                                       Register properties,
   3882                                                       Handle<Name> name,
   3883                                                       Register scratch0) {
   3884   DCHECK(name->IsUniqueName());
   3885   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   3886   // not equal to the name and kProbes-th slot is not used (its name is the
   3887   // undefined value), it guarantees the hash table doesn't contain the
   3888   // property. It's true even if some slots represent deleted properties
   3889   // (their names are the hole value).
   3890   for (int i = 0; i < kInlinedProbes; i++) {
   3891     // scratch0 points to properties hash.
   3892     // Compute the masked index: (hash + i + i * i) & mask.
   3893     Register index = scratch0;
   3894     // Capacity is smi 2^n.
   3895     __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
   3896     __ Dsubu(index, index, Operand(1));
   3897     __ And(index, index,
   3898            Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
   3899 
   3900     // Scale the index by multiplying by the entry size.
   3901     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3902     __ dsll(at, index, 1);
   3903     __ Daddu(index, index, at);  // index *= 3.
   3904 
   3905     Register entity_name = scratch0;
   3906     // Having undefined at this place means the name is not contained.
   3907     STATIC_ASSERT(kSmiTagSize == 1);
   3908     Register tmp = properties;
   3909 
   3910     __ dsll(scratch0, index, kPointerSizeLog2);
   3911     __ Daddu(tmp, properties, scratch0);
   3912     __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   3913 
   3914     DCHECK(!tmp.is(entity_name));
   3915     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
   3916     __ Branch(done, eq, entity_name, Operand(tmp));
   3917 
   3918     // Load the hole ready for use below:
   3919     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
   3920 
   3921     // Stop if found the property.
   3922     __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
   3923 
   3924     Label good;
   3925     __ Branch(&good, eq, entity_name, Operand(tmp));
   3926 
   3927     // Check if the entry name is not a unique name.
   3928     __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   3929     __ lbu(entity_name,
   3930            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   3931     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
   3932     __ bind(&good);
   3933 
   3934     // Restore the properties.
   3935     __ ld(properties,
   3936           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   3937   }
   3938 
   3939   const int spill_mask =
   3940       (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
   3941        a2.bit() | a1.bit() | a0.bit() | v0.bit());
   3942 
   3943   __ MultiPush(spill_mask);
   3944   __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   3945   __ li(a1, Operand(Handle<Name>(name)));
   3946   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   3947   __ CallStub(&stub);
   3948   __ mov(at, v0);
   3949   __ MultiPop(spill_mask);
   3950 
   3951   __ Branch(done, eq, at, Operand(zero_reg));
   3952   __ Branch(miss, ne, at, Operand(zero_reg));
   3953 }
   3954 
   3955 
   3956 // Probe the name dictionary in the |elements| register. Jump to the
   3957 // |done| label if a property with the given name is found. Jump to
   3958 // the |miss| label otherwise.
   3959 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
   3960 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
   3961                                                       Label* miss,
   3962                                                       Label* done,
   3963                                                       Register elements,
   3964                                                       Register name,
   3965                                                       Register scratch1,
   3966                                                       Register scratch2) {
   3967   DCHECK(!elements.is(scratch1));
   3968   DCHECK(!elements.is(scratch2));
   3969   DCHECK(!name.is(scratch1));
   3970   DCHECK(!name.is(scratch2));
   3971 
   3972   __ AssertName(name);
   3973 
   3974   // Compute the capacity mask.
   3975   __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
   3976   __ SmiUntag(scratch1);
   3977   __ Dsubu(scratch1, scratch1, Operand(1));
   3978 
   3979   // Generate an unrolled loop that performs a few probes before
   3980   // giving up. Measurements done on Gmail indicate that 2 probes
   3981   // cover ~93% of loads from dictionaries.
   3982   for (int i = 0; i < kInlinedProbes; i++) {
   3983     // Compute the masked index: (hash + i + i * i) & mask.
   3984     __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
   3985     if (i > 0) {
   3986       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   3987       // the hash in a separate instruction. The value hash + i + i * i is right
   3988       // shifted in the following and instruction.
   3989       DCHECK(NameDictionary::GetProbeOffset(i) <
   3990              1 << (32 - Name::kHashFieldOffset));
   3991       __ Daddu(scratch2, scratch2, Operand(
   3992           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   3993     }
   3994     __ dsrl(scratch2, scratch2, Name::kHashShift);
   3995     __ And(scratch2, scratch1, scratch2);
   3996 
   3997     // Scale the index by multiplying by the entry size.
   3998     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3999     // scratch2 = scratch2 * 3.
   4000 
   4001     __ dsll(at, scratch2, 1);
   4002     __ Daddu(scratch2, scratch2, at);
   4003 
   4004     // Check if the key is identical to the name.
   4005     __ dsll(at, scratch2, kPointerSizeLog2);
   4006     __ Daddu(scratch2, elements, at);
   4007     __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
   4008     __ Branch(done, eq, name, Operand(at));
   4009   }
   4010 
   4011   const int spill_mask =
   4012       (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
   4013        a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
   4014       ~(scratch1.bit() | scratch2.bit());
   4015 
   4016   __ MultiPush(spill_mask);
   4017   if (name.is(a0)) {
   4018     DCHECK(!elements.is(a1));
   4019     __ Move(a1, name);
   4020     __ Move(a0, elements);
   4021   } else {
   4022     __ Move(a0, elements);
   4023     __ Move(a1, name);
   4024   }
   4025   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
   4026   __ CallStub(&stub);
   4027   __ mov(scratch2, a2);
   4028   __ mov(at, v0);
   4029   __ MultiPop(spill_mask);
   4030 
   4031   __ Branch(done, ne, at, Operand(zero_reg));
   4032   __ Branch(miss, eq, at, Operand(zero_reg));
   4033 }
   4034 
   4035 
   4036 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   4037   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   4038   // we cannot call anything that could cause a GC from this stub.
   4039   // Registers:
   4040   //  result: NameDictionary to probe
   4041   //  a1: key
   4042   //  dictionary: NameDictionary to probe.
   4043   //  index: will hold an index of entry if lookup is successful.
   4044   //         might alias with result_.
   4045   // Returns:
   4046   //  result_ is zero if lookup failed, non zero otherwise.
   4047 
   4048   Register result = v0;
   4049   Register dictionary = a0;
   4050   Register key = a1;
   4051   Register index = a2;
   4052   Register mask = a3;
   4053   Register hash = a4;
   4054   Register undefined = a5;
   4055   Register entry_key = a6;
   4056 
   4057   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   4058 
   4059   __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
   4060   __ SmiUntag(mask);
   4061   __ Dsubu(mask, mask, Operand(1));
   4062 
   4063   __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
   4064 
   4065   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   4066 
   4067   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   4068     // Compute the masked index: (hash + i + i * i) & mask.
   4069     // Capacity is smi 2^n.
   4070     if (i > 0) {
   4071       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   4072       // the hash in a separate instruction. The value hash + i + i * i is right
   4073       // shifted in the following and instruction.
   4074       DCHECK(NameDictionary::GetProbeOffset(i) <
   4075              1 << (32 - Name::kHashFieldOffset));
   4076       __ Daddu(index, hash, Operand(
   4077           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   4078     } else {
   4079       __ mov(index, hash);
   4080     }
   4081     __ dsrl(index, index, Name::kHashShift);
   4082     __ And(index, mask, index);
   4083 
   4084     // Scale the index by multiplying by the entry size.
   4085     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   4086     // index *= 3.
   4087     __ mov(at, index);
   4088     __ dsll(index, index, 1);
   4089     __ Daddu(index, index, at);
   4090 
   4091 
   4092     STATIC_ASSERT(kSmiTagSize == 1);
   4093     __ dsll(index, index, kPointerSizeLog2);
   4094     __ Daddu(index, index, dictionary);
   4095     __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
   4096 
   4097     // Having undefined at this place means the name is not contained.
   4098     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
   4099 
   4100     // Stop if found the property.
   4101     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
   4102 
   4103     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
   4104       // Check if the entry name is not a unique name.
   4105       __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   4106       __ lbu(entry_key,
   4107              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   4108       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
   4109     }
   4110   }
   4111 
   4112   __ bind(&maybe_in_dictionary);
   4113   // If we are doing negative lookup then probing failure should be
   4114   // treated as a lookup success. For positive lookup probing failure
   4115   // should be treated as lookup failure.
   4116   if (mode() == POSITIVE_LOOKUP) {
   4117     __ Ret(USE_DELAY_SLOT);
   4118     __ mov(result, zero_reg);
   4119   }
   4120 
   4121   __ bind(&in_dictionary);
   4122   __ Ret(USE_DELAY_SLOT);
   4123   __ li(result, 1);
   4124 
   4125   __ bind(&not_in_dictionary);
   4126   __ Ret(USE_DELAY_SLOT);
   4127   __ mov(result, zero_reg);
   4128 }
   4129 
   4130 
   4131 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
   4132     Isolate* isolate) {
   4133   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
   4134   stub1.GetCode();
   4135   // Hydrogen code stubs need stub2 at snapshot time.
   4136   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
   4137   stub2.GetCode();
   4138 }
   4139 
   4140 
   4141 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
   4142 // the value has just been written into the object, now this stub makes sure
   4143 // we keep the GC informed.  The word in the object where the value has been
   4144 // written is in the address register.
   4145 void RecordWriteStub::Generate(MacroAssembler* masm) {
   4146   Label skip_to_incremental_noncompacting;
   4147   Label skip_to_incremental_compacting;
   4148 
   4149   // The first two branch+nop instructions are generated with labels so as to
   4150   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
   4151   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
   4152   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
   4153   // incremental heap marking.
   4154   // See RecordWriteStub::Patch for details.
   4155   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
   4156   __ nop();
   4157   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
   4158   __ nop();
   4159 
   4160   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   4161     __ RememberedSetHelper(object(),
   4162                            address(),
   4163                            value(),
   4164                            save_fp_regs_mode(),
   4165                            MacroAssembler::kReturnAtEnd);
   4166   }
   4167   __ Ret();
   4168 
   4169   __ bind(&skip_to_incremental_noncompacting);
   4170   GenerateIncremental(masm, INCREMENTAL);
   4171 
   4172   __ bind(&skip_to_incremental_compacting);
   4173   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   4174 
   4175   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
   4176   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
   4177 
   4178   PatchBranchIntoNop(masm, 0);
   4179   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
   4180 }
   4181 
   4182 
   4183 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   4184   regs_.Save(masm);
   4185 
   4186   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   4187     Label dont_need_remembered_set;
   4188 
   4189     __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
   4190     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
   4191                            regs_.scratch0(),
   4192                            &dont_need_remembered_set);
   4193 
   4194     __ CheckPageFlag(regs_.object(),
   4195                      regs_.scratch0(),
   4196                      1 << MemoryChunk::SCAN_ON_SCAVENGE,
   4197                      ne,
   4198                      &dont_need_remembered_set);
   4199 
   4200     // First notify the incremental marker if necessary, then update the
   4201     // remembered set.
   4202     CheckNeedsToInformIncrementalMarker(
   4203         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   4204     InformIncrementalMarker(masm);
   4205     regs_.Restore(masm);
   4206     __ RememberedSetHelper(object(),
   4207                            address(),
   4208                            value(),
   4209                            save_fp_regs_mode(),
   4210                            MacroAssembler::kReturnAtEnd);
   4211 
   4212     __ bind(&dont_need_remembered_set);
   4213   }
   4214 
   4215   CheckNeedsToInformIncrementalMarker(
   4216       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   4217   InformIncrementalMarker(masm);
   4218   regs_.Restore(masm);
   4219   __ Ret();
   4220 }
   4221 
   4222 
   4223 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   4224   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   4225   int argument_count = 3;
   4226   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   4227   Register address =
   4228       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
   4229   DCHECK(!address.is(regs_.object()));
   4230   DCHECK(!address.is(a0));
   4231   __ Move(address, regs_.address());
   4232   __ Move(a0, regs_.object());
   4233   __ Move(a1, address);
   4234   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   4235 
   4236   AllowExternalCallThatCantCauseGC scope(masm);
   4237   __ CallCFunction(
   4238       ExternalReference::incremental_marking_record_write_function(isolate()),
   4239       argument_count);
   4240   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
   4241 }
   4242 
   4243 
   4244 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   4245     MacroAssembler* masm,
   4246     OnNoNeedToInformIncrementalMarker on_no_need,
   4247     Mode mode) {
   4248   Label on_black;
   4249   Label need_incremental;
   4250   Label need_incremental_pop_scratch;
   4251 
   4252   __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
   4253   __ ld(regs_.scratch1(),
   4254         MemOperand(regs_.scratch0(),
   4255                    MemoryChunk::kWriteBarrierCounterOffset));
   4256   __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
   4257   __ sd(regs_.scratch1(),
   4258          MemOperand(regs_.scratch0(),
   4259                     MemoryChunk::kWriteBarrierCounterOffset));
   4260   __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
   4261 
   4262   // Let's look at the color of the object:  If it is not black we don't have
   4263   // to inform the incremental marker.
   4264   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   4265 
   4266   regs_.Restore(masm);
   4267   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   4268     __ RememberedSetHelper(object(),
   4269                            address(),
   4270                            value(),
   4271                            save_fp_regs_mode(),
   4272                            MacroAssembler::kReturnAtEnd);
   4273   } else {
   4274     __ Ret();
   4275   }
   4276 
   4277   __ bind(&on_black);
   4278 
   4279   // Get the value from the slot.
   4280   __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
   4281 
   4282   if (mode == INCREMENTAL_COMPACTION) {
   4283     Label ensure_not_white;
   4284 
   4285     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
   4286                      regs_.scratch1(),  // Scratch.
   4287                      MemoryChunk::kEvacuationCandidateMask,
   4288                      eq,
   4289                      &ensure_not_white);
   4290 
   4291     __ CheckPageFlag(regs_.object(),
   4292                      regs_.scratch1(),  // Scratch.
   4293                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
   4294                      eq,
   4295                      &need_incremental);
   4296 
   4297     __ bind(&ensure_not_white);
   4298   }
   4299 
   4300   // We need extra registers for this, so we push the object and the address
   4301   // register temporarily.
   4302   __ Push(regs_.object(), regs_.address());
   4303   __ JumpIfWhite(regs_.scratch0(),  // The value.
   4304                  regs_.scratch1(),  // Scratch.
   4305                  regs_.object(),    // Scratch.
   4306                  regs_.address(),   // Scratch.
   4307                  &need_incremental_pop_scratch);
   4308   __ Pop(regs_.object(), regs_.address());
   4309 
   4310   regs_.Restore(masm);
   4311   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   4312     __ RememberedSetHelper(object(),
   4313                            address(),
   4314                            value(),
   4315                            save_fp_regs_mode(),
   4316                            MacroAssembler::kReturnAtEnd);
   4317   } else {
   4318     __ Ret();
   4319   }
   4320 
   4321   __ bind(&need_incremental_pop_scratch);
   4322   __ Pop(regs_.object(), regs_.address());
   4323 
   4324   __ bind(&need_incremental);
   4325 
   4326   // Fall through when we need to inform the incremental marker.
   4327 }
   4328 
   4329 
   4330 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   4331   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   4332   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   4333   int parameter_count_offset =
   4334       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   4335   __ ld(a1, MemOperand(fp, parameter_count_offset));
   4336   if (function_mode() == JS_FUNCTION_STUB_MODE) {
   4337     __ Daddu(a1, a1, Operand(1));
   4338   }
   4339   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   4340   __ dsll(a1, a1, kPointerSizeLog2);
   4341   __ Ret(USE_DELAY_SLOT);
   4342   __ Daddu(sp, sp, a1);
   4343 }
   4344 
   4345 
   4346 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   4347   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   4348   LoadICStub stub(isolate(), state());
   4349   stub.GenerateForTrampoline(masm);
   4350 }
   4351 
   4352 
   4353 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   4354   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   4355   KeyedLoadICStub stub(isolate(), state());
   4356   stub.GenerateForTrampoline(masm);
   4357 }
   4358 
   4359 
   4360 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   4361   __ EmitLoadTypeFeedbackVector(a2);
   4362   CallICStub stub(isolate(), state());
   4363   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   4364 }
   4365 
   4366 
   4367 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
   4368 
   4369 
   4370 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4371   GenerateImpl(masm, true);
   4372 }
   4373 
   4374 
   4375 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
   4376                              Register receiver_map, Register scratch1,
   4377                              Register scratch2, bool is_polymorphic,
   4378                              Label* miss) {
   4379   // feedback initially contains the feedback array
   4380   Label next_loop, prepare_next;
   4381   Label start_polymorphic;
   4382 
   4383   Register cached_map = scratch1;
   4384 
   4385   __ ld(cached_map,
   4386         FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
   4387   __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   4388   __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
   4389   // found, now call handler.
   4390   Register handler = feedback;
   4391   __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
   4392   __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   4393   __ Jump(t9);
   4394 
   4395   Register length = scratch2;
   4396   __ bind(&start_polymorphic);
   4397   __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   4398   if (!is_polymorphic) {
   4399     // If the IC could be monomorphic we have to make sure we don't go past the
   4400     // end of the feedback array.
   4401     __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
   4402   }
   4403 
   4404   Register too_far = length;
   4405   Register pointer_reg = feedback;
   4406 
   4407   // +-----+------+------+-----+-----+ ... ----+
   4408   // | map | len  | wm0  | h0  | wm1 |      hN |
   4409   // +-----+------+------+-----+-----+ ... ----+
   4410   //                 0      1     2        len-1
   4411   //                              ^              ^
   4412   //                              |              |
   4413   //                         pointer_reg      too_far
   4414   //                         aka feedback     scratch2
   4415   // also need receiver_map
   4416   // use cached_map (scratch1) to look in the weak map values.
   4417   __ SmiScale(too_far, length, kPointerSizeLog2);
   4418   __ Daddu(too_far, feedback, Operand(too_far));
   4419   __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   4420   __ Daddu(pointer_reg, feedback,
   4421            Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
   4422 
   4423   __ bind(&next_loop);
   4424   __ ld(cached_map, MemOperand(pointer_reg));
   4425   __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   4426   __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
   4427   __ ld(handler, MemOperand(pointer_reg, kPointerSize));
   4428   __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   4429   __ Jump(t9);
   4430 
   4431   __ bind(&prepare_next);
   4432   __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
   4433   __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
   4434 
   4435   // We exhausted our array of map handler pairs.
   4436   __ Branch(miss);
   4437 }
   4438 
   4439 
   4440 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
   4441                                   Register receiver_map, Register feedback,
   4442                                   Register vector, Register slot,
   4443                                   Register scratch, Label* compare_map,
   4444                                   Label* load_smi_map, Label* try_array) {
   4445   __ JumpIfSmi(receiver, load_smi_map);
   4446   __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   4447   __ bind(compare_map);
   4448   Register cached_map = scratch;
   4449   // Move the weak map into the weak_cell register.
   4450   __ ld(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
   4451   __ Branch(try_array, ne, cached_map, Operand(receiver_map));
   4452   Register handler = feedback;
   4453   __ SmiScale(handler, slot, kPointerSizeLog2);
   4454   __ Daddu(handler, vector, Operand(handler));
   4455   __ ld(handler,
   4456         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
   4457   __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
   4458   __ Jump(t9);
   4459 }
   4460 
   4461 
   4462 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4463   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
   4464   Register name = LoadWithVectorDescriptor::NameRegister();          // a2
   4465   Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
   4466   Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
   4467   Register feedback = a4;
   4468   Register receiver_map = a5;
   4469   Register scratch1 = a6;
   4470 
   4471   __ SmiScale(feedback, slot, kPointerSizeLog2);
   4472   __ Daddu(feedback, vector, Operand(feedback));
   4473   __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4474 
   4475   // Try to quickly handle the monomorphic case without knowing for sure
   4476   // if we have a weak cell in feedback. We do know it's safe to look
   4477   // at WeakCell::kValueOffset.
   4478   Label try_array, load_smi_map, compare_map;
   4479   Label not_array, miss;
   4480   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4481                         scratch1, &compare_map, &load_smi_map, &try_array);
   4482 
   4483   // Is it a fixed array?
   4484   __ bind(&try_array);
   4485   __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4486   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
   4487   __ Branch(&not_array, ne, scratch1, Operand(at));
   4488   HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
   4489 
   4490   __ bind(&not_array);
   4491   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   4492   __ Branch(&miss, ne, feedback, Operand(at));
   4493   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
   4494       Code::ComputeHandlerFlags(Code::LOAD_IC));
   4495   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
   4496                                                receiver, name, feedback,
   4497                                                receiver_map, scratch1, a7);
   4498 
   4499   __ bind(&miss);
   4500   LoadIC::GenerateMiss(masm);
   4501 
   4502   __ bind(&load_smi_map);
   4503   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   4504   __ Branch(&compare_map);
   4505 }
   4506 
   4507 
   4508 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
   4509   GenerateImpl(masm, false);
   4510 }
   4511 
   4512 
   4513 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4514   GenerateImpl(masm, true);
   4515 }
   4516 
   4517 
   4518 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4519   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
   4520   Register key = LoadWithVectorDescriptor::NameRegister();           // a2
   4521   Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
   4522   Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
   4523   Register feedback = a4;
   4524   Register receiver_map = a5;
   4525   Register scratch1 = a6;
   4526 
   4527   __ SmiScale(feedback, slot, kPointerSizeLog2);
   4528   __ Daddu(feedback, vector, Operand(feedback));
   4529   __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4530 
   4531   // Try to quickly handle the monomorphic case without knowing for sure
   4532   // if we have a weak cell in feedback. We do know it's safe to look
   4533   // at WeakCell::kValueOffset.
   4534   Label try_array, load_smi_map, compare_map;
   4535   Label not_array, miss;
   4536   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4537                         scratch1, &compare_map, &load_smi_map, &try_array);
   4538 
   4539   __ bind(&try_array);
   4540   // Is it a fixed array?
   4541   __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4542   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
   4543   __ Branch(&not_array, ne, scratch1, Operand(at));
   4544   // We have a polymorphic element handler.
   4545   __ JumpIfNotSmi(key, &miss);
   4546 
   4547   Label polymorphic, try_poly_name;
   4548   __ bind(&polymorphic);
   4549   HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
   4550 
   4551   __ bind(&not_array);
   4552   // Is it generic?
   4553   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   4554   __ Branch(&try_poly_name, ne, feedback, Operand(at));
   4555   Handle<Code> megamorphic_stub =
   4556       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   4557   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   4558 
   4559   __ bind(&try_poly_name);
   4560   // We might have a name in feedback, and a fixed array in the next slot.
   4561   __ Branch(&miss, ne, key, Operand(feedback));
   4562   // If the name comparison succeeded, we know we have a fixed array with
   4563   // at least one map/handler pair.
   4564   __ SmiScale(feedback, slot, kPointerSizeLog2);
   4565   __ Daddu(feedback, vector, Operand(feedback));
   4566   __ ld(feedback,
   4567         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   4568   HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
   4569 
   4570   __ bind(&miss);
   4571   KeyedLoadIC::GenerateMiss(masm);
   4572 
   4573   __ bind(&load_smi_map);
   4574   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   4575   __ Branch(&compare_map);
   4576 }
   4577 
   4578 
   4579 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   4580   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   4581   VectorStoreICStub stub(isolate(), state());
   4582   stub.GenerateForTrampoline(masm);
   4583 }
   4584 
   4585 
   4586 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   4587   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   4588   VectorKeyedStoreICStub stub(isolate(), state());
   4589   stub.GenerateForTrampoline(masm);
   4590 }
   4591 
   4592 
   4593 void VectorStoreICStub::Generate(MacroAssembler* masm) {
   4594   GenerateImpl(masm, false);
   4595 }
   4596 
   4597 
   4598 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4599   GenerateImpl(masm, true);
   4600 }
   4601 
   4602 
   4603 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4604   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // a1
   4605   Register key = VectorStoreICDescriptor::NameRegister();           // a2
   4606   Register vector = VectorStoreICDescriptor::VectorRegister();      // a3
   4607   Register slot = VectorStoreICDescriptor::SlotRegister();          // a4
   4608   DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0));          // a0
   4609   Register feedback = a5;
   4610   Register receiver_map = a6;
   4611   Register scratch1 = a7;
   4612 
   4613   __ SmiScale(scratch1, slot, kPointerSizeLog2);
   4614   __ Daddu(feedback, vector, Operand(scratch1));
   4615   __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4616 
   4617   // Try to quickly handle the monomorphic case without knowing for sure
   4618   // if we have a weak cell in feedback. We do know it's safe to look
   4619   // at WeakCell::kValueOffset.
   4620   Label try_array, load_smi_map, compare_map;
   4621   Label not_array, miss;
   4622   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4623                         scratch1, &compare_map, &load_smi_map, &try_array);
   4624 
   4625   // Is it a fixed array?
   4626   __ bind(&try_array);
   4627   __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4628   __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
   4629 
   4630   Register scratch2 = t0;
   4631   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
   4632                    &miss);
   4633 
   4634   __ bind(&not_array);
   4635   __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
   4636   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
   4637       Code::ComputeHandlerFlags(Code::STORE_IC));
   4638   masm->isolate()->stub_cache()->GenerateProbe(
   4639       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
   4640       scratch1, scratch2);
   4641 
   4642   __ bind(&miss);
   4643   StoreIC::GenerateMiss(masm);
   4644 
   4645   __ bind(&load_smi_map);
   4646   __ Branch(USE_DELAY_SLOT, &compare_map);
   4647   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
   4648 }
   4649 
   4650 
   4651 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
   4652   GenerateImpl(masm, false);
   4653 }
   4654 
   4655 
   4656 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4657   GenerateImpl(masm, true);
   4658 }
   4659 
   4660 
   4661 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
   4662                                        Register receiver_map, Register scratch1,
   4663                                        Register scratch2, Label* miss) {
   4664   // feedback initially contains the feedback array
   4665   Label next_loop, prepare_next;
   4666   Label start_polymorphic;
   4667   Label transition_call;
   4668 
   4669   Register cached_map = scratch1;
   4670   Register too_far = scratch2;
   4671   Register pointer_reg = feedback;
   4672 
   4673   __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   4674 
   4675   // +-----+------+------+-----+-----+-----+ ... ----+
   4676   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
   4677   // +-----+------+------+-----+-----+ ----+ ... ----+
   4678   //                 0      1     2              len-1
   4679   //                 ^                                 ^
   4680   //                 |                                 |
   4681   //             pointer_reg                        too_far
   4682   //             aka feedback                       scratch2
   4683   // also need receiver_map
   4684   // use cached_map (scratch1) to look in the weak map values.
   4685   __ SmiScale(too_far, too_far, kPointerSizeLog2);
   4686   __ Daddu(too_far, feedback, Operand(too_far));
   4687   __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   4688   __ Daddu(pointer_reg, feedback,
   4689            Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
   4690 
   4691   __ bind(&next_loop);
   4692   __ ld(cached_map, MemOperand(pointer_reg));
   4693   __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   4694   __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
   4695   // Is it a transitioning store?
   4696   __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
   4697   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   4698   __ Branch(&transition_call, ne, too_far, Operand(at));
   4699 
   4700   __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
   4701   __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
   4702   __ Jump(t9);
   4703 
   4704   __ bind(&transition_call);
   4705   __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
   4706   __ JumpIfSmi(too_far, miss);
   4707 
   4708   __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
   4709   // Load the map into the correct register.
   4710   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
   4711   __ Move(feedback, too_far);
   4712   __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
   4713   __ Jump(t9);
   4714 
   4715   __ bind(&prepare_next);
   4716   __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
   4717   __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
   4718 
   4719   // We exhausted our array of map handler pairs.
   4720   __ Branch(miss);
   4721 }
   4722 
   4723 
   4724 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4725   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // a1
   4726   Register key = VectorStoreICDescriptor::NameRegister();           // a2
   4727   Register vector = VectorStoreICDescriptor::VectorRegister();      // a3
   4728   Register slot = VectorStoreICDescriptor::SlotRegister();          // a4
   4729   DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0));          // a0
   4730   Register feedback = a5;
   4731   Register receiver_map = a6;
   4732   Register scratch1 = a7;
   4733 
   4734   __ SmiScale(scratch1, slot, kPointerSizeLog2);
   4735   __ Daddu(feedback, vector, Operand(scratch1));
   4736   __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4737 
   4738   // Try to quickly handle the monomorphic case without knowing for sure
   4739   // if we have a weak cell in feedback. We do know it's safe to look
   4740   // at WeakCell::kValueOffset.
   4741   Label try_array, load_smi_map, compare_map;
   4742   Label not_array, miss;
   4743   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4744                         scratch1, &compare_map, &load_smi_map, &try_array);
   4745 
   4746   __ bind(&try_array);
   4747   // Is it a fixed array?
   4748   __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4749   __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
   4750 
   4751   // We have a polymorphic element handler.
   4752   Label try_poly_name;
   4753 
   4754   Register scratch2 = t0;
   4755 
   4756   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
   4757                              &miss);
   4758 
   4759   __ bind(&not_array);
   4760   // Is it generic?
   4761   __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
   4762   Handle<Code> megamorphic_stub =
   4763       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   4764   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   4765 
   4766   __ bind(&try_poly_name);
   4767   // We might have a name in feedback, and a fixed array in the next slot.
   4768   __ Branch(&miss, ne, key, Operand(feedback));
   4769   // If the name comparison succeeded, we know we have a fixed array with
   4770   // at least one map/handler pair.
   4771   __ SmiScale(scratch1, slot, kPointerSizeLog2);
   4772   __ Daddu(feedback, vector, Operand(scratch1));
   4773   __ ld(feedback,
   4774         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   4775   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
   4776                    &miss);
   4777 
   4778   __ bind(&miss);
   4779   KeyedStoreIC::GenerateMiss(masm);
   4780 
   4781   __ bind(&load_smi_map);
   4782   __ Branch(USE_DELAY_SLOT, &compare_map);
   4783   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
   4784 }
   4785 
   4786 
   4787 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   4788   if (masm->isolate()->function_entry_hook() != NULL) {
   4789     ProfileEntryHookStub stub(masm->isolate());
   4790     __ push(ra);
   4791     __ CallStub(&stub);
   4792     __ pop(ra);
   4793   }
   4794 }
   4795 
   4796 
   4797 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   4798   // The entry hook is a "push ra" instruction, followed by a call.
   4799   // Note: on MIPS "push" is 2 instruction
   4800   const int32_t kReturnAddressDistanceFromFunctionStart =
   4801       Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
   4802 
   4803   // This should contain all kJSCallerSaved registers.
   4804   const RegList kSavedRegs =
   4805      kJSCallerSaved |  // Caller saved registers.
   4806      s5.bit();         // Saved stack pointer.
   4807 
   4808   // We also save ra, so the count here is one higher than the mask indicates.
   4809   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
   4810 
   4811   // Save all caller-save registers as this may be called from anywhere.
   4812   __ MultiPush(kSavedRegs | ra.bit());
   4813 
   4814   // Compute the function's address for the first argument.
   4815   __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
   4816 
   4817   // The caller's return address is above the saved temporaries.
   4818   // Grab that for the second argument to the hook.
   4819   __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
   4820 
   4821   // Align the stack if necessary.
   4822   int frame_alignment = masm->ActivationFrameAlignment();
   4823   if (frame_alignment > kPointerSize) {
   4824     __ mov(s5, sp);
   4825     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   4826     __ And(sp, sp, Operand(-frame_alignment));
   4827   }
   4828 
   4829   __ Dsubu(sp, sp, kCArgsSlotsSize);
   4830 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
   4831   int64_t entry_hook =
   4832       reinterpret_cast<int64_t>(isolate()->function_entry_hook());
   4833   __ li(t9, Operand(entry_hook));
   4834 #else
   4835   // Under the simulator we need to indirect the entry hook through a
   4836   // trampoline function at a known address.
   4837   // It additionally takes an isolate as a third parameter.
   4838   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   4839 
   4840   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   4841   __ li(t9, Operand(ExternalReference(&dispatcher,
   4842                                       ExternalReference::BUILTIN_CALL,
   4843                                       isolate())));
   4844 #endif
   4845   // Call C function through t9 to conform ABI for PIC.
   4846   __ Call(t9);
   4847 
   4848   // Restore the stack pointer if needed.
   4849   if (frame_alignment > kPointerSize) {
   4850     __ mov(sp, s5);
   4851   } else {
   4852     __ Daddu(sp, sp, kCArgsSlotsSize);
   4853   }
   4854 
   4855   // Also pop ra to get Ret(0).
   4856   __ MultiPop(kSavedRegs | ra.bit());
   4857   __ Ret();
   4858 }
   4859 
   4860 
   4861 template<class T>
   4862 static void CreateArrayDispatch(MacroAssembler* masm,
   4863                                 AllocationSiteOverrideMode mode) {
   4864   if (mode == DISABLE_ALLOCATION_SITES) {
   4865     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   4866     __ TailCallStub(&stub);
   4867   } else if (mode == DONT_OVERRIDE) {
   4868     int last_index = GetSequenceIndexFromFastElementsKind(
   4869         TERMINAL_FAST_ELEMENTS_KIND);
   4870     for (int i = 0; i <= last_index; ++i) {
   4871       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4872       T stub(masm->isolate(), kind);
   4873       __ TailCallStub(&stub, eq, a3, Operand(kind));
   4874     }
   4875 
   4876     // If we reached this point there is a problem.
   4877     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4878   } else {
   4879     UNREACHABLE();
   4880   }
   4881 }
   4882 
   4883 
   4884 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   4885                                            AllocationSiteOverrideMode mode) {
   4886   // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   4887   // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
   4888   // a0 - number of arguments
   4889   // a1 - constructor?
   4890   // sp[0] - last argument
   4891   Label normal_sequence;
   4892   if (mode == DONT_OVERRIDE) {
   4893     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   4894     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   4895     STATIC_ASSERT(FAST_ELEMENTS == 2);
   4896     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   4897     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   4898     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   4899 
   4900     // is the low bit set? If so, we are holey and that is good.
   4901     __ And(at, a3, Operand(1));
   4902     __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
   4903   }
   4904   // look at the first argument
   4905   __ ld(a5, MemOperand(sp, 0));
   4906   __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
   4907 
   4908   if (mode == DISABLE_ALLOCATION_SITES) {
   4909     ElementsKind initial = GetInitialFastElementsKind();
   4910     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   4911 
   4912     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
   4913                                                   holey_initial,
   4914                                                   DISABLE_ALLOCATION_SITES);
   4915     __ TailCallStub(&stub_holey);
   4916 
   4917     __ bind(&normal_sequence);
   4918     ArraySingleArgumentConstructorStub stub(masm->isolate(),
   4919                                             initial,
   4920                                             DISABLE_ALLOCATION_SITES);
   4921     __ TailCallStub(&stub);
   4922   } else if (mode == DONT_OVERRIDE) {
   4923     // We are going to create a holey array, but our kind is non-holey.
   4924     // Fix kind and retry (only if we have an allocation site in the slot).
   4925     __ Daddu(a3, a3, Operand(1));
   4926 
   4927     if (FLAG_debug_code) {
   4928       __ ld(a5, FieldMemOperand(a2, 0));
   4929       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   4930       __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
   4931     }
   4932 
   4933     // Save the resulting elements kind in type info. We can't just store a3
   4934     // in the AllocationSite::transition_info field because elements kind is
   4935     // restricted to a portion of the field...upper bits need to be left alone.
   4936     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   4937     __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
   4938     __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
   4939     __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
   4940 
   4941 
   4942     __ bind(&normal_sequence);
   4943     int last_index = GetSequenceIndexFromFastElementsKind(
   4944         TERMINAL_FAST_ELEMENTS_KIND);
   4945     for (int i = 0; i <= last_index; ++i) {
   4946       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4947       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
   4948       __ TailCallStub(&stub, eq, a3, Operand(kind));
   4949     }
   4950 
   4951     // If we reached this point there is a problem.
   4952     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4953   } else {
   4954     UNREACHABLE();
   4955   }
   4956 }
   4957 
   4958 
   4959 template<class T>
   4960 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   4961   int to_index = GetSequenceIndexFromFastElementsKind(
   4962       TERMINAL_FAST_ELEMENTS_KIND);
   4963   for (int i = 0; i <= to_index; ++i) {
   4964     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4965     T stub(isolate, kind);
   4966     stub.GetCode();
   4967     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   4968       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   4969       stub1.GetCode();
   4970     }
   4971   }
   4972 }
   4973 
   4974 
   4975 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
   4976   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   4977       isolate);
   4978   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
   4979       isolate);
   4980   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
   4981       isolate);
   4982 }
   4983 
   4984 
   4985 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
   4986     Isolate* isolate) {
   4987   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   4988   for (int i = 0; i < 2; i++) {
   4989     // For internal arrays we only need a few things.
   4990     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   4991     stubh1.GetCode();
   4992     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   4993     stubh2.GetCode();
   4994     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
   4995     stubh3.GetCode();
   4996   }
   4997 }
   4998 
   4999 
   5000 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   5001     MacroAssembler* masm,
   5002     AllocationSiteOverrideMode mode) {
   5003   if (argument_count() == ANY) {
   5004     Label not_zero_case, not_one_case;
   5005     __ And(at, a0, a0);
   5006     __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
   5007     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   5008 
   5009     __ bind(&not_zero_case);
   5010     __ Branch(&not_one_case, gt, a0, Operand(1));
   5011     CreateArrayDispatchOneArgument(masm, mode);
   5012 
   5013     __ bind(&not_one_case);
   5014     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   5015   } else if (argument_count() == NONE) {
   5016     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   5017   } else if (argument_count() == ONE) {
   5018     CreateArrayDispatchOneArgument(masm, mode);
   5019   } else if (argument_count() == MORE_THAN_ONE) {
   5020     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   5021   } else {
   5022     UNREACHABLE();
   5023   }
   5024 }
   5025 
   5026 
   5027 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   5028   // ----------- S t a t e -------------
   5029   //  -- a0 : argc (only if argument_count() == ANY)
   5030   //  -- a1 : constructor
   5031   //  -- a2 : AllocationSite or undefined
   5032   //  -- a3 : new target
   5033   //  -- sp[0] : last argument
   5034   // -----------------------------------
   5035 
   5036   if (FLAG_debug_code) {
   5037     // The array construct code is only set for the global and natives
   5038     // builtin Array functions which always have maps.
   5039 
   5040     // Initial map for the builtin Array function should be a map.
   5041     __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
   5042     // Will both indicate a NULL and a Smi.
   5043     __ SmiTst(a4, at);
   5044     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
   5045         at, Operand(zero_reg));
   5046     __ GetObjectType(a4, a4, a5);
   5047     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
   5048         a5, Operand(MAP_TYPE));
   5049 
   5050     // We should either have undefined in a2 or a valid AllocationSite
   5051     __ AssertUndefinedOrAllocationSite(a2, a4);
   5052   }
   5053 
   5054   // Enter the context of the Array function.
   5055   __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
   5056 
   5057   Label subclassing;
   5058   __ Branch(&subclassing, ne, a1, Operand(a3));
   5059 
   5060   Label no_info;
   5061   // Get the elements kind and case on that.
   5062   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   5063   __ Branch(&no_info, eq, a2, Operand(at));
   5064 
   5065   __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
   5066   __ SmiUntag(a3);
   5067   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   5068   __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
   5069   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   5070 
   5071   __ bind(&no_info);
   5072   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   5073 
   5074   // Subclassing.
   5075   __ bind(&subclassing);
   5076   switch (argument_count()) {
   5077     case ANY:
   5078     case MORE_THAN_ONE:
   5079       __ dsll(at, a0, kPointerSizeLog2);
   5080       __ Daddu(at, sp, at);
   5081       __ sd(a1, MemOperand(at));
   5082       __ li(at, Operand(3));
   5083       __ Daddu(a0, a0, at);
   5084       break;
   5085     case NONE:
   5086       __ sd(a1, MemOperand(sp, 0 * kPointerSize));
   5087       __ li(a0, Operand(3));
   5088       break;
   5089     case ONE:
   5090       __ sd(a1, MemOperand(sp, 1 * kPointerSize));
   5091       __ li(a0, Operand(4));
   5092       break;
   5093   }
   5094   __ Push(a3, a2);
   5095   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
   5096 }
   5097 
   5098 
   5099 void InternalArrayConstructorStub::GenerateCase(
   5100     MacroAssembler* masm, ElementsKind kind) {
   5101 
   5102   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   5103   __ TailCallStub(&stub0, lo, a0, Operand(1));
   5104 
   5105   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
   5106   __ TailCallStub(&stubN, hi, a0, Operand(1));
   5107 
   5108   if (IsFastPackedElementsKind(kind)) {
   5109     // We might need to create a holey array
   5110     // look at the first argument.
   5111     __ ld(at, MemOperand(sp, 0));
   5112 
   5113     InternalArraySingleArgumentConstructorStub
   5114         stub1_holey(isolate(), GetHoleyElementsKind(kind));
   5115     __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
   5116   }
   5117 
   5118   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   5119   __ TailCallStub(&stub1);
   5120 }
   5121 
   5122 
   5123 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   5124   // ----------- S t a t e -------------
   5125   //  -- a0 : argc
   5126   //  -- a1 : constructor
   5127   //  -- sp[0] : return address
   5128   //  -- sp[4] : last argument
   5129   // -----------------------------------
   5130 
   5131   if (FLAG_debug_code) {
   5132     // The array construct code is only set for the global and natives
   5133     // builtin Array functions which always have maps.
   5134 
   5135     // Initial map for the builtin Array function should be a map.
   5136     __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
   5137     // Will both indicate a NULL and a Smi.
   5138     __ SmiTst(a3, at);
   5139     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
   5140         at, Operand(zero_reg));
   5141     __ GetObjectType(a3, a3, a4);
   5142     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
   5143         a4, Operand(MAP_TYPE));
   5144   }
   5145 
   5146   // Figure out the right elements kind.
   5147   __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
   5148 
   5149   // Load the map's "bit field 2" into a3. We only need the first byte,
   5150   // but the following bit field extraction takes care of that anyway.
   5151   __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
   5152   // Retrieve elements_kind from bit field 2.
   5153   __ DecodeField<Map::ElementsKindBits>(a3);
   5154 
   5155   if (FLAG_debug_code) {
   5156     Label done;
   5157     __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
   5158     __ Assert(
   5159         eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
   5160         a3, Operand(FAST_HOLEY_ELEMENTS));
   5161     __ bind(&done);
   5162   }
   5163 
   5164   Label fast_elements_case;
   5165   __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
   5166   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   5167 
   5168   __ bind(&fast_elements_case);
   5169   GenerateCase(masm, FAST_ELEMENTS);
   5170 }
   5171 
   5172 
   5173 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   5174   Register context_reg = cp;
   5175   Register slot_reg = a2;
   5176   Register result_reg = v0;
   5177   Label slow_case;
   5178 
   5179   // Go up context chain to the script context.
   5180   for (int i = 0; i < depth(); ++i) {
   5181     __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
   5182     context_reg = result_reg;
   5183   }
   5184 
   5185   // Load the PropertyCell value at the specified slot.
   5186   __ dsll(at, slot_reg, kPointerSizeLog2);
   5187   __ Daddu(at, at, Operand(context_reg));
   5188   __ ld(result_reg, ContextMemOperand(at, 0));
   5189   __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
   5190 
   5191   // Check that value is not the_hole.
   5192   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   5193   __ Branch(&slow_case, eq, result_reg, Operand(at));
   5194   __ Ret();
   5195 
   5196   // Fallback to the runtime.
   5197   __ bind(&slow_case);
   5198   __ SmiTag(slot_reg);
   5199   __ Push(slot_reg);
   5200   __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
   5201 }
   5202 
   5203 
   5204 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
   5205   Register context_reg = cp;
   5206   Register slot_reg = a2;
   5207   Register value_reg = a0;
   5208   Register cell_reg = a4;
   5209   Register cell_value_reg = a5;
   5210   Register cell_details_reg = a6;
   5211   Label fast_heapobject_case, fast_smi_case, slow_case;
   5212 
   5213   if (FLAG_debug_code) {
   5214     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   5215     __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
   5216   }
   5217 
   5218   // Go up context chain to the script context.
   5219   for (int i = 0; i < depth(); ++i) {
   5220     __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
   5221     context_reg = cell_reg;
   5222   }
   5223 
   5224   // Load the PropertyCell at the specified slot.
   5225   __ dsll(at, slot_reg, kPointerSizeLog2);
   5226   __ Daddu(at, at, Operand(context_reg));
   5227   __ ld(cell_reg, ContextMemOperand(at, 0));
   5228 
   5229   // Load PropertyDetails for the cell (actually only the cell_type and kind).
   5230   __ ld(cell_details_reg,
   5231         FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
   5232   __ SmiUntag(cell_details_reg);
   5233   __ And(cell_details_reg, cell_details_reg,
   5234          PropertyDetails::PropertyCellTypeField::kMask |
   5235              PropertyDetails::KindField::kMask |
   5236              PropertyDetails::kAttributesReadOnlyMask);
   5237 
   5238   // Check if PropertyCell holds mutable data.
   5239   Label not_mutable_data;
   5240   __ Branch(&not_mutable_data, ne, cell_details_reg,
   5241             Operand(PropertyDetails::PropertyCellTypeField::encode(
   5242                         PropertyCellType::kMutable) |
   5243                     PropertyDetails::KindField::encode(kData)));
   5244   __ JumpIfSmi(value_reg, &fast_smi_case);
   5245   __ bind(&fast_heapobject_case);
   5246   __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
   5247   __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
   5248                       cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
   5249                       EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   5250   // RecordWriteField clobbers the value register, so we need to reload.
   5251   __ Ret(USE_DELAY_SLOT);
   5252   __ ld(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
   5253   __ bind(&not_mutable_data);
   5254 
   5255   // Check if PropertyCell value matches the new value (relevant for Constant,
   5256   // ConstantType and Undefined cells).
   5257   Label not_same_value;
   5258   __ ld(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
   5259   __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
   5260   // Make sure the PropertyCell is not marked READ_ONLY.
   5261   __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
   5262   __ Branch(&slow_case, ne, at, Operand(zero_reg));
   5263   if (FLAG_debug_code) {
   5264     Label done;
   5265     // This can only be true for Constant, ConstantType and Undefined cells,
   5266     // because we never store the_hole via this stub.
   5267     __ Branch(&done, eq, cell_details_reg,
   5268               Operand(PropertyDetails::PropertyCellTypeField::encode(
   5269                           PropertyCellType::kConstant) |
   5270                       PropertyDetails::KindField::encode(kData)));
   5271     __ Branch(&done, eq, cell_details_reg,
   5272               Operand(PropertyDetails::PropertyCellTypeField::encode(
   5273                           PropertyCellType::kConstantType) |
   5274                       PropertyDetails::KindField::encode(kData)));
   5275     __ Check(eq, kUnexpectedValue, cell_details_reg,
   5276              Operand(PropertyDetails::PropertyCellTypeField::encode(
   5277                          PropertyCellType::kUndefined) |
   5278                      PropertyDetails::KindField::encode(kData)));
   5279     __ bind(&done);
   5280   }
   5281   __ Ret();
   5282   __ bind(&not_same_value);
   5283 
   5284   // Check if PropertyCell contains data with constant type (and is not
   5285   // READ_ONLY).
   5286   __ Branch(&slow_case, ne, cell_details_reg,
   5287             Operand(PropertyDetails::PropertyCellTypeField::encode(
   5288                         PropertyCellType::kConstantType) |
   5289                     PropertyDetails::KindField::encode(kData)));
   5290 
   5291   // Now either both old and new values must be SMIs or both must be heap
   5292   // objects with same map.
   5293   Label value_is_heap_object;
   5294   __ JumpIfNotSmi(value_reg, &value_is_heap_object);
   5295   __ JumpIfNotSmi(cell_value_reg, &slow_case);
   5296   // Old and new values are SMIs, no need for a write barrier here.
   5297   __ bind(&fast_smi_case);
   5298   __ Ret(USE_DELAY_SLOT);
   5299   __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
   5300   __ bind(&value_is_heap_object);
   5301   __ JumpIfSmi(cell_value_reg, &slow_case);
   5302   Register cell_value_map_reg = cell_value_reg;
   5303   __ ld(cell_value_map_reg,
   5304         FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
   5305   __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
   5306             FieldMemOperand(value_reg, HeapObject::kMapOffset));
   5307 
   5308   // Fallback to the runtime.
   5309   __ bind(&slow_case);
   5310   __ SmiTag(slot_reg);
   5311   __ Push(slot_reg, value_reg);
   5312   __ TailCallRuntime(is_strict(language_mode())
   5313                          ? Runtime::kStoreGlobalViaContext_Strict
   5314                          : Runtime::kStoreGlobalViaContext_Sloppy);
   5315 }
   5316 
   5317 
   5318 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   5319   int64_t offset = (ref0.address() - ref1.address());
   5320   DCHECK(static_cast<int>(offset) == offset);
   5321   return static_cast<int>(offset);
   5322 }
   5323 
   5324 
   5325 // Calls an API function.  Allocates HandleScope, extracts returned value
   5326 // from handle and propagates exceptions.  Restores context.  stack_space
   5327 // - space to be unwound on exit (includes the call JS arguments space and
   5328 // the additional space allocated for the fast call).
   5329 static void CallApiFunctionAndReturn(
   5330     MacroAssembler* masm, Register function_address,
   5331     ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
   5332     MemOperand return_value_operand, MemOperand* context_restore_operand) {
   5333   Isolate* isolate = masm->isolate();
   5334   ExternalReference next_address =
   5335       ExternalReference::handle_scope_next_address(isolate);
   5336   const int kNextOffset = 0;
   5337   const int kLimitOffset = AddressOffset(
   5338       ExternalReference::handle_scope_limit_address(isolate), next_address);
   5339   const int kLevelOffset = AddressOffset(
   5340       ExternalReference::handle_scope_level_address(isolate), next_address);
   5341 
   5342   DCHECK(function_address.is(a1) || function_address.is(a2));
   5343 
   5344   Label profiler_disabled;
   5345   Label end_profiler_check;
   5346   __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
   5347   __ lb(t9, MemOperand(t9, 0));
   5348   __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
   5349 
   5350   // Additional parameter is the address of the actual callback.
   5351   __ li(t9, Operand(thunk_ref));
   5352   __ jmp(&end_profiler_check);
   5353 
   5354   __ bind(&profiler_disabled);
   5355   __ mov(t9, function_address);
   5356   __ bind(&end_profiler_check);
   5357 
   5358   // Allocate HandleScope in callee-save registers.
   5359   __ li(s3, Operand(next_address));
   5360   __ ld(s0, MemOperand(s3, kNextOffset));
   5361   __ ld(s1, MemOperand(s3, kLimitOffset));
   5362   __ lw(s2, MemOperand(s3, kLevelOffset));
   5363   __ Addu(s2, s2, Operand(1));
   5364   __ sw(s2, MemOperand(s3, kLevelOffset));
   5365 
   5366   if (FLAG_log_timer_events) {
   5367     FrameScope frame(masm, StackFrame::MANUAL);
   5368     __ PushSafepointRegisters();
   5369     __ PrepareCallCFunction(1, a0);
   5370     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
   5371     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
   5372                      1);
   5373     __ PopSafepointRegisters();
   5374   }
   5375 
   5376   // Native call returns to the DirectCEntry stub which redirects to the
   5377   // return address pushed on stack (could have moved after GC).
   5378   // DirectCEntry stub itself is generated early and never moves.
   5379   DirectCEntryStub stub(isolate);
   5380   stub.GenerateCall(masm, t9);
   5381 
   5382   if (FLAG_log_timer_events) {
   5383     FrameScope frame(masm, StackFrame::MANUAL);
   5384     __ PushSafepointRegisters();
   5385     __ PrepareCallCFunction(1, a0);
   5386     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
   5387     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
   5388                      1);
   5389     __ PopSafepointRegisters();
   5390   }
   5391 
   5392   Label promote_scheduled_exception;
   5393   Label delete_allocated_handles;
   5394   Label leave_exit_frame;
   5395   Label return_value_loaded;
   5396 
   5397   // Load value from ReturnValue.
   5398   __ ld(v0, return_value_operand);
   5399   __ bind(&return_value_loaded);
   5400 
   5401   // No more valid handles (the result handle was the last one). Restore
   5402   // previous handle scope.
   5403   __ sd(s0, MemOperand(s3, kNextOffset));
   5404   if (__ emit_debug_code()) {
   5405     __ lw(a1, MemOperand(s3, kLevelOffset));
   5406     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
   5407   }
   5408   __ Subu(s2, s2, Operand(1));
   5409   __ sw(s2, MemOperand(s3, kLevelOffset));
   5410   __ ld(at, MemOperand(s3, kLimitOffset));
   5411   __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
   5412 
   5413   // Leave the API exit frame.
   5414   __ bind(&leave_exit_frame);
   5415 
   5416   bool restore_context = context_restore_operand != NULL;
   5417   if (restore_context) {
   5418     __ ld(cp, *context_restore_operand);
   5419   }
   5420   if (stack_space_offset != kInvalidStackOffset) {
   5421     DCHECK(kCArgsSlotsSize == 0);
   5422     __ ld(s0, MemOperand(sp, stack_space_offset));
   5423   } else {
   5424     __ li(s0, Operand(stack_space));
   5425   }
   5426   __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
   5427                     stack_space_offset != kInvalidStackOffset);
   5428 
   5429   // Check if the function scheduled an exception.
   5430   __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
   5431   __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
   5432   __ ld(a5, MemOperand(at));
   5433   __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
   5434 
   5435   __ Ret();
   5436 
   5437   // Re-throw by promoting a scheduled exception.
   5438   __ bind(&promote_scheduled_exception);
   5439   __ TailCallRuntime(Runtime::kPromoteScheduledException);
   5440 
   5441   // HandleScope limit has changed. Delete allocated extensions.
   5442   __ bind(&delete_allocated_handles);
   5443   __ sd(s1, MemOperand(s3, kLimitOffset));
   5444   __ mov(s0, v0);
   5445   __ mov(a0, v0);
   5446   __ PrepareCallCFunction(1, s1);
   5447   __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
   5448   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
   5449                    1);
   5450   __ mov(v0, s0);
   5451   __ jmp(&leave_exit_frame);
   5452 }
   5453 
   5454 
   5455 static void CallApiFunctionStubHelper(MacroAssembler* masm,
   5456                                       const ParameterCount& argc,
   5457                                       bool return_first_arg,
   5458                                       bool call_data_undefined) {
   5459   // ----------- S t a t e -------------
   5460   //  -- a0                  : callee
   5461   //  -- a4                  : call_data
   5462   //  -- a2                  : holder
   5463   //  -- a1                  : api_function_address
   5464   //  -- a3                  : number of arguments if argc is a register
   5465   //  -- cp                  : context
   5466   //  --
   5467   //  -- sp[0]               : last argument
   5468   //  -- ...
   5469   //  -- sp[(argc - 1)* 8]   : first argument
   5470   //  -- sp[argc * 8]        : receiver
   5471   // -----------------------------------
   5472 
   5473   Register callee = a0;
   5474   Register call_data = a4;
   5475   Register holder = a2;
   5476   Register api_function_address = a1;
   5477   Register context = cp;
   5478 
   5479   typedef FunctionCallbackArguments FCA;
   5480 
   5481   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   5482   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   5483   STATIC_ASSERT(FCA::kDataIndex == 4);
   5484   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   5485   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   5486   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   5487   STATIC_ASSERT(FCA::kHolderIndex == 0);
   5488   STATIC_ASSERT(FCA::kArgsLength == 7);
   5489 
   5490   DCHECK(argc.is_immediate() || a3.is(argc.reg()));
   5491 
   5492   // Save context, callee and call data.
   5493   __ Push(context, callee, call_data);
   5494   // Load context from callee.
   5495   __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   5496 
   5497   Register scratch = call_data;
   5498   if (!call_data_undefined) {
   5499     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   5500   }
   5501   // Push return value and default return value.
   5502   __ Push(scratch, scratch);
   5503   __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
   5504   // Push isolate and holder.
   5505   __ Push(scratch, holder);
   5506 
   5507   // Prepare arguments.
   5508   __ mov(scratch, sp);
   5509 
   5510   // Allocate the v8::Arguments structure in the arguments' space since
   5511   // it's not controlled by GC.
   5512   const int kApiStackSpace = 4;
   5513 
   5514   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5515   __ EnterExitFrame(false, kApiStackSpace);
   5516 
   5517   DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
   5518   // a0 = FunctionCallbackInfo&
   5519   // Arguments is after the return address.
   5520   __ Daddu(a0, sp, Operand(1 * kPointerSize));
   5521   // FunctionCallbackInfo::implicit_args_
   5522   __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
   5523   if (argc.is_immediate()) {
   5524     // FunctionCallbackInfo::values_
   5525     __ Daddu(at, scratch,
   5526              Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
   5527     __ sd(at, MemOperand(a0, 1 * kPointerSize));
   5528     // FunctionCallbackInfo::length_ = argc
   5529     // Stored as int field, 32-bit integers within struct on stack always left
   5530     // justified by n64 ABI.
   5531     __ li(at, Operand(argc.immediate()));
   5532     __ sw(at, MemOperand(a0, 2 * kPointerSize));
   5533     // FunctionCallbackInfo::is_construct_call_ = 0
   5534     __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
   5535   } else {
   5536     // FunctionCallbackInfo::values_
   5537     __ dsll(at, argc.reg(), kPointerSizeLog2);
   5538     __ Daddu(at, at, scratch);
   5539     __ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
   5540     __ sd(at, MemOperand(a0, 1 * kPointerSize));
   5541     // FunctionCallbackInfo::length_ = argc
   5542     // Stored as int field, 32-bit integers within struct on stack always left
   5543     // justified by n64 ABI.
   5544     __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
   5545     // FunctionCallbackInfo::is_construct_call_
   5546     __ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
   5547     __ dsll(at, argc.reg(), kPointerSizeLog2);
   5548     __ sw(at, MemOperand(a0, 2 * kPointerSize + kIntSize));
   5549   }
   5550 
   5551   ExternalReference thunk_ref =
   5552       ExternalReference::invoke_function_callback(masm->isolate());
   5553 
   5554   AllowExternalCallThatCantCauseGC scope(masm);
   5555   MemOperand context_restore_operand(
   5556       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   5557   // Stores return the first js argument.
   5558   int return_value_offset = 0;
   5559   if (return_first_arg) {
   5560     return_value_offset = 2 + FCA::kArgsLength;
   5561   } else {
   5562     return_value_offset = 2 + FCA::kReturnValueOffset;
   5563   }
   5564   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   5565   int stack_space = 0;
   5566   int32_t stack_space_offset = 4 * kPointerSize;
   5567   if (argc.is_immediate()) {
   5568     stack_space = argc.immediate() + FCA::kArgsLength + 1;
   5569     stack_space_offset = kInvalidStackOffset;
   5570   }
   5571   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
   5572                            stack_space_offset, return_value_operand,
   5573                            &context_restore_operand);
   5574 }
   5575 
   5576 
   5577 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   5578   bool call_data_undefined = this->call_data_undefined();
   5579   CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
   5580                             call_data_undefined);
   5581 }
   5582 
   5583 
   5584 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
   5585   bool is_store = this->is_store();
   5586   int argc = this->argc();
   5587   bool call_data_undefined = this->call_data_undefined();
   5588   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
   5589                             call_data_undefined);
   5590 }
   5591 
   5592 
   5593 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   5594   // ----------- S t a t e -------------
   5595   //  -- sp[0]                  : name
   5596   //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
   5597   //  -- ...
   5598   //  -- a2                     : api_function_address
   5599   // -----------------------------------
   5600 
   5601   Register api_function_address = ApiGetterDescriptor::function_address();
   5602   DCHECK(api_function_address.is(a2));
   5603 
   5604   __ mov(a0, sp);  // a0 = Handle<Name>
   5605   __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
   5606 
   5607   const int kApiStackSpace = 1;
   5608   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5609   __ EnterExitFrame(false, kApiStackSpace);
   5610 
   5611   // Create PropertyAccessorInfo instance on the stack above the exit frame with
   5612   // a1 (internal::Object** args_) as the data.
   5613   __ sd(a1, MemOperand(sp, 1 * kPointerSize));
   5614   __ Daddu(a1, sp, Operand(1 * kPointerSize));  // a1 = AccessorInfo&
   5615 
   5616   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   5617 
   5618   ExternalReference thunk_ref =
   5619       ExternalReference::invoke_accessor_getter_callback(isolate());
   5620   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
   5621                            kStackUnwindSpace, kInvalidStackOffset,
   5622                            MemOperand(fp, 6 * kPointerSize), NULL);
   5623 }
   5624 
   5625 
   5626 #undef __
   5627 
   5628 }  // namespace internal
   5629 }  // namespace v8
   5630 
   5631 #endif  // V8_TARGET_ARCH_MIPS64
   5632