Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_MIPS
      6 
      7 #include "src/code-stubs.h"
      8 #include "src/api-arguments.h"
      9 #include "src/base/bits.h"
     10 #include "src/bootstrapper.h"
     11 #include "src/codegen.h"
     12 #include "src/ic/handler-compiler.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 #include "src/isolate.h"
     16 #include "src/mips/code-stubs-mips.h"
     17 #include "src/regexp/jsregexp.h"
     18 #include "src/regexp/regexp-macro-assembler.h"
     19 #include "src/runtime/runtime.h"
     20 
     21 namespace v8 {
     22 namespace internal {
     23 
     24 #define __ ACCESS_MASM(masm)
     25 
     26 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
     27   __ sll(t9, a0, kPointerSizeLog2);
     28   __ Addu(t9, sp, t9);
     29   __ sw(a1, MemOperand(t9, 0));
     30   __ Push(a1);
     31   __ Push(a2);
     32   __ Addu(a0, a0, Operand(3));
     33   __ TailCallRuntime(Runtime::kNewArray);
     34 }
     35 
     36 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
     37                                           Condition cc);
     38 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
     39                                     Register lhs,
     40                                     Register rhs,
     41                                     Label* rhs_not_nan,
     42                                     Label* slow,
     43                                     bool strict);
     44 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
     45                                            Register lhs,
     46                                            Register rhs);
     47 
     48 
     49 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
     50                                                ExternalReference miss) {
     51   // Update the static counter each time a new code stub is generated.
     52   isolate()->counters()->code_stubs()->Increment();
     53 
     54   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
     55   int param_count = descriptor.GetRegisterParameterCount();
     56   {
     57     // Call the runtime system in a fresh internal frame.
     58     FrameScope scope(masm, StackFrame::INTERNAL);
     59     DCHECK(param_count == 0 ||
     60            a0.is(descriptor.GetRegisterParameter(param_count - 1)));
     61     // Push arguments, adjust sp.
     62     __ Subu(sp, sp, Operand(param_count * kPointerSize));
     63     for (int i = 0; i < param_count; ++i) {
     64       // Store argument to stack.
     65       __ sw(descriptor.GetRegisterParameter(i),
     66             MemOperand(sp, (param_count - 1 - i) * kPointerSize));
     67     }
     68     __ CallExternalReference(miss, param_count);
     69   }
     70 
     71   __ Ret();
     72 }
     73 
     74 
     75 void DoubleToIStub::Generate(MacroAssembler* masm) {
     76   Label out_of_range, only_low, negate, done;
     77   Register input_reg = source();
     78   Register result_reg = destination();
     79 
     80   int double_offset = offset();
     81   // Account for saved regs if input is sp.
     82   if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
     83 
     84   Register scratch =
     85       GetRegisterThatIsNotOneOf(input_reg, result_reg);
     86   Register scratch2 =
     87       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
     88   Register scratch3 =
     89       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
     90   DoubleRegister double_scratch = kLithiumScratchDouble;
     91 
     92   __ Push(scratch, scratch2, scratch3);
     93 
     94   if (!skip_fastpath()) {
     95     // Load double input.
     96     __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
     97 
     98     // Clear cumulative exception flags and save the FCSR.
     99     __ cfc1(scratch2, FCSR);
    100     __ ctc1(zero_reg, FCSR);
    101 
    102     // Try a conversion to a signed integer.
    103     __ Trunc_w_d(double_scratch, double_scratch);
    104     // Move the converted value into the result register.
    105     __ mfc1(scratch3, double_scratch);
    106 
    107     // Retrieve and restore the FCSR.
    108     __ cfc1(scratch, FCSR);
    109     __ ctc1(scratch2, FCSR);
    110 
    111     // Check for overflow and NaNs.
    112     __ And(
    113         scratch, scratch,
    114         kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
    115            | kFCSRInvalidOpFlagMask);
    116     // If we had no exceptions then set result_reg and we are done.
    117     Label error;
    118     __ Branch(&error, ne, scratch, Operand(zero_reg));
    119     __ Move(result_reg, scratch3);
    120     __ Branch(&done);
    121     __ bind(&error);
    122   }
    123 
    124   // Load the double value and perform a manual truncation.
    125   Register input_high = scratch2;
    126   Register input_low = scratch3;
    127 
    128   __ lw(input_low,
    129       MemOperand(input_reg, double_offset + Register::kMantissaOffset));
    130   __ lw(input_high,
    131       MemOperand(input_reg, double_offset + Register::kExponentOffset));
    132 
    133   Label normal_exponent, restore_sign;
    134   // Extract the biased exponent in result.
    135   __ Ext(result_reg,
    136          input_high,
    137          HeapNumber::kExponentShift,
    138          HeapNumber::kExponentBits);
    139 
    140   // Check for Infinity and NaNs, which should return 0.
    141   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
    142   __ Movz(result_reg, zero_reg, scratch);
    143   __ Branch(&done, eq, scratch, Operand(zero_reg));
    144 
    145   // Express exponent as delta to (number of mantissa bits + 31).
    146   __ Subu(result_reg,
    147           result_reg,
    148           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
    149 
    150   // If the delta is strictly positive, all bits would be shifted away,
    151   // which means that we can return 0.
    152   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
    153   __ mov(result_reg, zero_reg);
    154   __ Branch(&done);
    155 
    156   __ bind(&normal_exponent);
    157   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
    158   // Calculate shift.
    159   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
    160 
    161   // Save the sign.
    162   Register sign = result_reg;
    163   result_reg = no_reg;
    164   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
    165 
    166   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
    167   // to check for this specific case.
    168   Label high_shift_needed, high_shift_done;
    169   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
    170   __ mov(input_high, zero_reg);
    171   __ Branch(&high_shift_done);
    172   __ bind(&high_shift_needed);
    173 
    174   // Set the implicit 1 before the mantissa part in input_high.
    175   __ Or(input_high,
    176         input_high,
    177         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
    178   // Shift the mantissa bits to the correct position.
    179   // We don't need to clear non-mantissa bits as they will be shifted away.
    180   // If they weren't, it would mean that the answer is in the 32bit range.
    181   __ sllv(input_high, input_high, scratch);
    182 
    183   __ bind(&high_shift_done);
    184 
    185   // Replace the shifted bits with bits from the lower mantissa word.
    186   Label pos_shift, shift_done;
    187   __ li(at, 32);
    188   __ subu(scratch, at, scratch);
    189   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
    190 
    191   // Negate scratch.
    192   __ Subu(scratch, zero_reg, scratch);
    193   __ sllv(input_low, input_low, scratch);
    194   __ Branch(&shift_done);
    195 
    196   __ bind(&pos_shift);
    197   __ srlv(input_low, input_low, scratch);
    198 
    199   __ bind(&shift_done);
    200   __ Or(input_high, input_high, Operand(input_low));
    201   // Restore sign if necessary.
    202   __ mov(scratch, sign);
    203   result_reg = sign;
    204   sign = no_reg;
    205   __ Subu(result_reg, zero_reg, input_high);
    206   __ Movz(result_reg, input_high, scratch);
    207 
    208   __ bind(&done);
    209 
    210   __ Pop(scratch, scratch2, scratch3);
    211   __ Ret();
    212 }
    213 
    214 
    215 // Handle the case where the lhs and rhs are the same object.
    216 // Equality is almost reflexive (everything but NaN), so this is a test
    217 // for "identity and not NaN".
    218 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
    219                                           Condition cc) {
    220   Label not_identical;
    221   Label heap_number, return_equal;
    222   Register exp_mask_reg = t5;
    223 
    224   __ Branch(&not_identical, ne, a0, Operand(a1));
    225 
    226   __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
    227 
    228   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
    229   // so we do the second best thing - test it ourselves.
    230   // They are both equal and they are not both Smis so both of them are not
    231   // Smis. If it's not a heap number, then return equal.
    232   __ GetObjectType(a0, t4, t4);
    233   if (cc == less || cc == greater) {
    234     // Call runtime on identical JSObjects.
    235     __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
    236     // Call runtime on identical symbols since we need to throw a TypeError.
    237     __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
    238   } else {
    239     __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
    240     // Comparing JS objects with <=, >= is complicated.
    241     if (cc != eq) {
    242       __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
    243       // Call runtime on identical symbols since we need to throw a TypeError.
    244       __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
    245       // Normally here we fall through to return_equal, but undefined is
    246       // special: (undefined == undefined) == true, but
    247       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    248       if (cc == less_equal || cc == greater_equal) {
    249         __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
    250         __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
    251         __ Branch(&return_equal, ne, a0, Operand(t2));
    252         DCHECK(is_int16(GREATER) && is_int16(LESS));
    253         __ Ret(USE_DELAY_SLOT);
    254         if (cc == le) {
    255           // undefined <= undefined should fail.
    256           __ li(v0, Operand(GREATER));
    257         } else  {
    258           // undefined >= undefined should fail.
    259           __ li(v0, Operand(LESS));
    260         }
    261       }
    262     }
    263   }
    264 
    265   __ bind(&return_equal);
    266   DCHECK(is_int16(GREATER) && is_int16(LESS));
    267   __ Ret(USE_DELAY_SLOT);
    268   if (cc == less) {
    269     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
    270   } else if (cc == greater) {
    271     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
    272   } else {
    273     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
    274   }
    275 
    276   // For less and greater we don't have to check for NaN since the result of
    277   // x < x is false regardless.  For the others here is some code to check
    278   // for NaN.
    279   if (cc != lt && cc != gt) {
    280     __ bind(&heap_number);
    281     // It is a heap number, so return non-equal if it's NaN and equal if it's
    282     // not NaN.
    283 
    284     // The representation of NaN values has all exponent bits (52..62) set,
    285     // and not all mantissa bits (0..51) clear.
    286     // Read top bits of double representation (second word of value).
    287     __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
    288     // Test that exponent bits are all set.
    289     __ And(t3, t2, Operand(exp_mask_reg));
    290     // If all bits not set (ne cond), then not a NaN, objects are equal.
    291     __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
    292 
    293     // Shift out flag and all exponent bits, retaining only mantissa.
    294     __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
    295     // Or with all low-bits of mantissa.
    296     __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
    297     __ Or(v0, t3, Operand(t2));
    298     // For equal we already have the right value in v0:  Return zero (equal)
    299     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
    300     // not (it's a NaN).  For <= and >= we need to load v0 with the failing
    301     // value if it's a NaN.
    302     if (cc != eq) {
    303       // All-zero means Infinity means equal.
    304       __ Ret(eq, v0, Operand(zero_reg));
    305       DCHECK(is_int16(GREATER) && is_int16(LESS));
    306       __ Ret(USE_DELAY_SLOT);
    307       if (cc == le) {
    308         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
    309       } else {
    310         __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
    311       }
    312     }
    313   }
    314   // No fall through here.
    315 
    316   __ bind(&not_identical);
    317 }
    318 
    319 
    320 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
    321                                     Register lhs,
    322                                     Register rhs,
    323                                     Label* both_loaded_as_doubles,
    324                                     Label* slow,
    325                                     bool strict) {
    326   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
    327          (lhs.is(a1) && rhs.is(a0)));
    328 
    329   Label lhs_is_smi;
    330   __ JumpIfSmi(lhs, &lhs_is_smi);
    331   // Rhs is a Smi.
    332   // Check whether the non-smi is a heap number.
    333   __ GetObjectType(lhs, t4, t4);
    334   if (strict) {
    335     // If lhs was not a number and rhs was a Smi then strict equality cannot
    336     // succeed. Return non-equal (lhs is already not zero).
    337     __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
    338     __ mov(v0, lhs);
    339   } else {
    340     // Smi compared non-strictly with a non-Smi non-heap-number. Call
    341     // the runtime.
    342     __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
    343   }
    344 
    345   // Rhs is a smi, lhs is a number.
    346   // Convert smi rhs to double.
    347   __ sra(at, rhs, kSmiTagSize);
    348   __ mtc1(at, f14);
    349   __ cvt_d_w(f14, f14);
    350   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    351 
    352   // We now have both loaded as doubles.
    353   __ jmp(both_loaded_as_doubles);
    354 
    355   __ bind(&lhs_is_smi);
    356   // Lhs is a Smi.  Check whether the non-smi is a heap number.
    357   __ GetObjectType(rhs, t4, t4);
    358   if (strict) {
    359     // If lhs was not a number and rhs was a Smi then strict equality cannot
    360     // succeed. Return non-equal.
    361     __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
    362     __ li(v0, Operand(1));
    363   } else {
    364     // Smi compared non-strictly with a non-Smi non-heap-number. Call
    365     // the runtime.
    366     __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
    367   }
    368 
    369   // Lhs is a smi, rhs is a number.
    370   // Convert smi lhs to double.
    371   __ sra(at, lhs, kSmiTagSize);
    372   __ mtc1(at, f12);
    373   __ cvt_d_w(f12, f12);
    374   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    375   // Fall through to both_loaded_as_doubles.
    376 }
    377 
    378 
    379 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
    380                                            Register lhs,
    381                                            Register rhs) {
    382     // If either operand is a JS object or an oddball value, then they are
    383     // not equal since their pointers are different.
    384     // There is no test for undetectability in strict equality.
    385     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    386     Label first_non_object;
    387     // Get the type of the first operand into a2 and compare it with
    388     // FIRST_JS_RECEIVER_TYPE.
    389     __ GetObjectType(lhs, a2, a2);
    390     __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
    391 
    392     // Return non-zero.
    393     Label return_not_equal;
    394     __ bind(&return_not_equal);
    395     __ Ret(USE_DELAY_SLOT);
    396     __ li(v0, Operand(1));
    397 
    398     __ bind(&first_non_object);
    399     // Check for oddballs: true, false, null, undefined.
    400     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
    401 
    402     __ GetObjectType(rhs, a3, a3);
    403     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
    404 
    405     // Check for oddballs: true, false, null, undefined.
    406     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
    407 
    408     // Now that we have the types we might as well check for
    409     // internalized-internalized.
    410     STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    411     __ Or(a2, a2, Operand(a3));
    412     __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
    413     __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
    414 }
    415 
    416 
    417 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
    418                                        Register lhs,
    419                                        Register rhs,
    420                                        Label* both_loaded_as_doubles,
    421                                        Label* not_heap_numbers,
    422                                        Label* slow) {
    423   __ GetObjectType(lhs, a3, a2);
    424   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
    425   __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
    426   // If first was a heap number & second wasn't, go to slow case.
    427   __ Branch(slow, ne, a3, Operand(a2));
    428 
    429   // Both are heap numbers. Load them up then jump to the code we have
    430   // for that.
    431   __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    432   __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    433 
    434   __ jmp(both_loaded_as_doubles);
    435 }
    436 
    437 
    438 // Fast negative check for internalized-to-internalized equality.
    439 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
    440                                                      Register lhs, Register rhs,
    441                                                      Label* possible_strings,
    442                                                      Label* runtime_call) {
    443   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
    444          (lhs.is(a1) && rhs.is(a0)));
    445 
    446   // a2 is object type of rhs.
    447   Label object_test, return_equal, return_unequal, undetectable;
    448   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    449   __ And(at, a2, Operand(kIsNotStringMask));
    450   __ Branch(&object_test, ne, at, Operand(zero_reg));
    451   __ And(at, a2, Operand(kIsNotInternalizedMask));
    452   __ Branch(possible_strings, ne, at, Operand(zero_reg));
    453   __ GetObjectType(rhs, a3, a3);
    454   __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
    455   __ And(at, a3, Operand(kIsNotInternalizedMask));
    456   __ Branch(possible_strings, ne, at, Operand(zero_reg));
    457 
    458   // Both are internalized. We already checked they weren't the same pointer so
    459   // they are not equal. Return non-equal by returning the non-zero object
    460   // pointer in v0.
    461   __ Ret(USE_DELAY_SLOT);
    462   __ mov(v0, a0);  // In delay slot.
    463 
    464   __ bind(&object_test);
    465   __ lw(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
    466   __ lw(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
    467   __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
    468   __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
    469   __ And(at, t0, Operand(1 << Map::kIsUndetectable));
    470   __ Branch(&undetectable, ne, at, Operand(zero_reg));
    471   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
    472   __ Branch(&return_unequal, ne, at, Operand(zero_reg));
    473 
    474   __ GetInstanceType(a2, a2);
    475   __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
    476   __ GetInstanceType(a3, a3);
    477   __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
    478 
    479   __ bind(&return_unequal);
    480   // Return non-equal by returning the non-zero object pointer in v0.
    481   __ Ret(USE_DELAY_SLOT);
    482   __ mov(v0, a0);  // In delay slot.
    483 
    484   __ bind(&undetectable);
    485   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
    486   __ Branch(&return_unequal, eq, at, Operand(zero_reg));
    487 
    488   // If both sides are JSReceivers, then the result is false according to
    489   // the HTML specification, which says that only comparisons with null or
    490   // undefined are affected by special casing for document.all.
    491   __ GetInstanceType(a2, a2);
    492   __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
    493   __ GetInstanceType(a3, a3);
    494   __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
    495 
    496   __ bind(&return_equal);
    497   __ Ret(USE_DELAY_SLOT);
    498   __ li(v0, Operand(EQUAL));  // In delay slot.
    499 }
    500 
    501 
    502 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
    503                                          Register scratch,
    504                                          CompareICState::State expected,
    505                                          Label* fail) {
    506   Label ok;
    507   if (expected == CompareICState::SMI) {
    508     __ JumpIfNotSmi(input, fail);
    509   } else if (expected == CompareICState::NUMBER) {
    510     __ JumpIfSmi(input, &ok);
    511     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
    512                 DONT_DO_SMI_CHECK);
    513   }
    514   // We could be strict about internalized/string here, but as long as
    515   // hydrogen doesn't care, the stub doesn't have to care either.
    516   __ bind(&ok);
    517 }
    518 
    519 
    520 // On entry a1 and a2 are the values to be compared.
    521 // On exit a0 is 0, positive or negative to indicate the result of
    522 // the comparison.
    523 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
    524   Register lhs = a1;
    525   Register rhs = a0;
    526   Condition cc = GetCondition();
    527 
    528   Label miss;
    529   CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
    530   CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
    531 
    532   Label slow;  // Call builtin.
    533   Label not_smis, both_loaded_as_doubles;
    534 
    535   Label not_two_smis, smi_done;
    536   __ Or(a2, a1, a0);
    537   __ JumpIfNotSmi(a2, &not_two_smis);
    538   __ sra(a1, a1, 1);
    539   __ sra(a0, a0, 1);
    540   __ Ret(USE_DELAY_SLOT);
    541   __ subu(v0, a1, a0);
    542   __ bind(&not_two_smis);
    543 
    544   // NOTICE! This code is only reached after a smi-fast-case check, so
    545   // it is certain that at least one operand isn't a smi.
    546 
    547   // Handle the case where the objects are identical.  Either returns the answer
    548   // or goes to slow.  Only falls through if the objects were not identical.
    549   EmitIdenticalObjectComparison(masm, &slow, cc);
    550 
    551   // If either is a Smi (we know that not both are), then they can only
    552   // be strictly equal if the other is a HeapNumber.
    553   STATIC_ASSERT(kSmiTag == 0);
    554   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
    555   __ And(t2, lhs, Operand(rhs));
    556   __ JumpIfNotSmi(t2, &not_smis, t0);
    557   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
    558   // 1) Return the answer.
    559   // 2) Go to slow.
    560   // 3) Fall through to both_loaded_as_doubles.
    561   // 4) Jump to rhs_not_nan.
    562   // In cases 3 and 4 we have found out we were dealing with a number-number
    563   // comparison and the numbers have been loaded into f12 and f14 as doubles,
    564   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
    565   EmitSmiNonsmiComparison(masm, lhs, rhs,
    566                           &both_loaded_as_doubles, &slow, strict());
    567 
    568   __ bind(&both_loaded_as_doubles);
    569   // f12, f14 are the double representations of the left hand side
    570   // and the right hand side if we have FPU. Otherwise a2, a3 represent
    571   // left hand side and a0, a1 represent right hand side.
    572   Label nan;
    573   __ li(t0, Operand(LESS));
    574   __ li(t1, Operand(GREATER));
    575   __ li(t2, Operand(EQUAL));
    576 
    577   // Check if either rhs or lhs is NaN.
    578   __ BranchF(NULL, &nan, eq, f12, f14);
    579 
    580   // Check if LESS condition is satisfied. If true, move conditionally
    581   // result to v0.
    582   if (!IsMipsArchVariant(kMips32r6)) {
    583     __ c(OLT, D, f12, f14);
    584     __ Movt(v0, t0);
    585     // Use previous check to store conditionally to v0 oposite condition
    586     // (GREATER). If rhs is equal to lhs, this will be corrected in next
    587     // check.
    588     __ Movf(v0, t1);
    589     // Check if EQUAL condition is satisfied. If true, move conditionally
    590     // result to v0.
    591     __ c(EQ, D, f12, f14);
    592     __ Movt(v0, t2);
    593   } else {
    594     Label skip;
    595     __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
    596     __ mov(v0, t0);  // Return LESS as result.
    597 
    598     __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
    599     __ mov(v0, t2);  // Return EQUAL as result.
    600 
    601     __ mov(v0, t1);  // Return GREATER as result.
    602     __ bind(&skip);
    603   }
    604 
    605   __ Ret();
    606 
    607   __ bind(&nan);
    608   // NaN comparisons always fail.
    609   // Load whatever we need in v0 to make the comparison fail.
    610   DCHECK(is_int16(GREATER) && is_int16(LESS));
    611   __ Ret(USE_DELAY_SLOT);
    612   if (cc == lt || cc == le) {
    613     __ li(v0, Operand(GREATER));
    614   } else {
    615     __ li(v0, Operand(LESS));
    616   }
    617 
    618 
    619   __ bind(&not_smis);
    620   // At this point we know we are dealing with two different objects,
    621   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
    622   if (strict()) {
    623     // This returns non-equal for some object types, or falls through if it
    624     // was not lucky.
    625     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
    626   }
    627 
    628   Label check_for_internalized_strings;
    629   Label flat_string_check;
    630   // Check for heap-number-heap-number comparison. Can jump to slow case,
    631   // or load both doubles and jump to the code that handles
    632   // that case. If the inputs are not doubles then jumps to
    633   // check_for_internalized_strings.
    634   // In this case a2 will contain the type of lhs_.
    635   EmitCheckForTwoHeapNumbers(masm,
    636                              lhs,
    637                              rhs,
    638                              &both_loaded_as_doubles,
    639                              &check_for_internalized_strings,
    640                              &flat_string_check);
    641 
    642   __ bind(&check_for_internalized_strings);
    643   if (cc == eq && !strict()) {
    644     // Returns an answer for two internalized strings or two
    645     // detectable objects.
    646     // Otherwise jumps to string case or not both strings case.
    647     // Assumes that a2 is the type of lhs_ on entry.
    648     EmitCheckForInternalizedStringsOrObjects(
    649         masm, lhs, rhs, &flat_string_check, &slow);
    650   }
    651 
    652   // Check for both being sequential one-byte strings,
    653   // and inline if that is the case.
    654   __ bind(&flat_string_check);
    655 
    656   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
    657 
    658   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
    659                       a3);
    660   if (cc == eq) {
    661     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
    662   } else {
    663     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
    664                                                     t1);
    665   }
    666   // Never falls through to here.
    667 
    668   __ bind(&slow);
    669   if (cc == eq) {
    670     {
    671       FrameScope scope(masm, StackFrame::INTERNAL);
    672       __ Push(cp);
    673       __ Call(strict() ? isolate()->builtins()->StrictEqual()
    674                        : isolate()->builtins()->Equal(),
    675               RelocInfo::CODE_TARGET);
    676       __ Pop(cp);
    677     }
    678     // Turn true into 0 and false into some non-zero value.
    679     STATIC_ASSERT(EQUAL == 0);
    680     __ LoadRoot(a0, Heap::kTrueValueRootIndex);
    681     __ Ret(USE_DELAY_SLOT);
    682     __ subu(v0, v0, a0);  // In delay slot.
    683   } else {
    684     // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
    685     // a1 (rhs) second.
    686     __ Push(lhs, rhs);
    687     int ncr;  // NaN compare result.
    688     if (cc == lt || cc == le) {
    689       ncr = GREATER;
    690     } else {
    691       DCHECK(cc == gt || cc == ge);  // Remaining cases.
    692       ncr = LESS;
    693     }
    694     __ li(a0, Operand(Smi::FromInt(ncr)));
    695     __ push(a0);
    696 
    697     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    698     // tagged as a small integer.
    699     __ TailCallRuntime(Runtime::kCompare);
    700   }
    701 
    702   __ bind(&miss);
    703   GenerateMiss(masm);
    704 }
    705 
    706 
    707 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
    708   __ mov(t9, ra);
    709   __ pop(ra);
    710   __ PushSafepointRegisters();
    711   __ Jump(t9);
    712 }
    713 
    714 
    715 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
    716   __ mov(t9, ra);
    717   __ pop(ra);
    718   __ PopSafepointRegisters();
    719   __ Jump(t9);
    720 }
    721 
    722 
    723 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
    724   // We don't allow a GC during a store buffer overflow so there is no need to
    725   // store the registers in any particular way, but we do have to store and
    726   // restore them.
    727   __ MultiPush(kJSCallerSaved | ra.bit());
    728   if (save_doubles()) {
    729     __ MultiPushFPU(kCallerSavedFPU);
    730   }
    731   const int argument_count = 1;
    732   const int fp_argument_count = 0;
    733   const Register scratch = a1;
    734 
    735   AllowExternalCallThatCantCauseGC scope(masm);
    736   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
    737   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
    738   __ CallCFunction(
    739       ExternalReference::store_buffer_overflow_function(isolate()),
    740       argument_count);
    741   if (save_doubles()) {
    742     __ MultiPopFPU(kCallerSavedFPU);
    743   }
    744 
    745   __ MultiPop(kJSCallerSaved | ra.bit());
    746   __ Ret();
    747 }
    748 
    749 
    750 void MathPowStub::Generate(MacroAssembler* masm) {
    751   const Register exponent = MathPowTaggedDescriptor::exponent();
    752   DCHECK(exponent.is(a2));
    753   const DoubleRegister double_base = f2;
    754   const DoubleRegister double_exponent = f4;
    755   const DoubleRegister double_result = f0;
    756   const DoubleRegister double_scratch = f6;
    757   const FPURegister single_scratch = f8;
    758   const Register scratch = t5;
    759   const Register scratch2 = t3;
    760 
    761   Label call_runtime, done, int_exponent;
    762   if (exponent_type() == TAGGED) {
    763     // Base is already in double_base.
    764     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    765 
    766     __ ldc1(double_exponent,
    767             FieldMemOperand(exponent, HeapNumber::kValueOffset));
    768   }
    769 
    770   if (exponent_type() != INTEGER) {
    771     Label int_exponent_convert;
    772     // Detect integer exponents stored as double.
    773     __ EmitFPUTruncate(kRoundToMinusInf,
    774                        scratch,
    775                        double_exponent,
    776                        at,
    777                        double_scratch,
    778                        scratch2,
    779                        kCheckForInexactConversion);
    780     // scratch2 == 0 means there was no conversion error.
    781     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
    782 
    783     __ push(ra);
    784     {
    785       AllowExternalCallThatCantCauseGC scope(masm);
    786       __ PrepareCallCFunction(0, 2, scratch2);
    787       __ MovToFloatParameters(double_base, double_exponent);
    788       __ CallCFunction(
    789           ExternalReference::power_double_double_function(isolate()),
    790           0, 2);
    791     }
    792     __ pop(ra);
    793     __ MovFromFloatResult(double_result);
    794     __ jmp(&done);
    795 
    796     __ bind(&int_exponent_convert);
    797   }
    798 
    799   // Calculate power with integer exponent.
    800   __ bind(&int_exponent);
    801 
    802   // Get two copies of exponent in the registers scratch and exponent.
    803   if (exponent_type() == INTEGER) {
    804     __ mov(scratch, exponent);
    805   } else {
    806     // Exponent has previously been stored into scratch as untagged integer.
    807     __ mov(exponent, scratch);
    808   }
    809 
    810   __ mov_d(double_scratch, double_base);  // Back up base.
    811   __ Move(double_result, 1.0);
    812 
    813   // Get absolute value of exponent.
    814   Label positive_exponent, bail_out;
    815   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
    816   __ Subu(scratch, zero_reg, scratch);
    817   // Check when Subu overflows and we get negative result
    818   // (happens only when input is MIN_INT).
    819   __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
    820   __ bind(&positive_exponent);
    821   __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
    822 
    823   Label while_true, no_carry, loop_end;
    824   __ bind(&while_true);
    825 
    826   __ And(scratch2, scratch, 1);
    827 
    828   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
    829   __ mul_d(double_result, double_result, double_scratch);
    830   __ bind(&no_carry);
    831 
    832   __ sra(scratch, scratch, 1);
    833 
    834   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
    835   __ mul_d(double_scratch, double_scratch, double_scratch);
    836 
    837   __ Branch(&while_true);
    838 
    839   __ bind(&loop_end);
    840 
    841   __ Branch(&done, ge, exponent, Operand(zero_reg));
    842   __ Move(double_scratch, 1.0);
    843   __ div_d(double_result, double_scratch, double_result);
    844   // Test whether result is zero.  Bail out to check for subnormal result.
    845   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
    846   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
    847 
    848   // double_exponent may not contain the exponent value if the input was a
    849   // smi.  We set it with exponent value before bailing out.
    850   __ bind(&bail_out);
    851   __ mtc1(exponent, single_scratch);
    852   __ cvt_d_w(double_exponent, single_scratch);
    853 
    854   // Returning or bailing out.
    855   __ push(ra);
    856   {
    857     AllowExternalCallThatCantCauseGC scope(masm);
    858     __ PrepareCallCFunction(0, 2, scratch);
    859     __ MovToFloatParameters(double_base, double_exponent);
    860     __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
    861                      0, 2);
    862   }
    863   __ pop(ra);
    864   __ MovFromFloatResult(double_result);
    865 
    866   __ bind(&done);
    867   __ Ret();
    868 }
    869 
    870 bool CEntryStub::NeedsImmovableCode() {
    871   return true;
    872 }
    873 
    874 
    875 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
    876   CEntryStub::GenerateAheadOfTime(isolate);
    877   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
    878   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
    879   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
    880   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
    881   CreateWeakCellStub::GenerateAheadOfTime(isolate);
    882   BinaryOpICStub::GenerateAheadOfTime(isolate);
    883   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
    884   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
    885   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
    886   StoreFastElementStub::GenerateAheadOfTime(isolate);
    887 }
    888 
    889 
    890 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
    891   StoreRegistersStateStub stub(isolate);
    892   stub.GetCode();
    893 }
    894 
    895 
    896 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
    897   RestoreRegistersStateStub stub(isolate);
    898   stub.GetCode();
    899 }
    900 
    901 
    902 void CodeStub::GenerateFPStubs(Isolate* isolate) {
    903   // Generate if not already in cache.
    904   SaveFPRegsMode mode = kSaveFPRegs;
    905   CEntryStub(isolate, 1, mode).GetCode();
    906   StoreBufferOverflowStub(isolate, mode).GetCode();
    907 }
    908 
    909 
    910 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
    911   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
    912   stub.GetCode();
    913 }
    914 
    915 
    916 void CEntryStub::Generate(MacroAssembler* masm) {
    917   // Called from JavaScript; parameters are on stack as if calling JS function
    918   // a0: number of arguments including receiver
    919   // a1: pointer to builtin function
    920   // fp: frame pointer    (restored after C call)
    921   // sp: stack pointer    (restored as callee's sp after C call)
    922   // cp: current context  (C callee-saved)
    923   //
    924   // If argv_in_register():
    925   // a2: pointer to the first argument
    926 
    927   ProfileEntryHookStub::MaybeCallEntryHook(masm);
    928 
    929   if (argv_in_register()) {
    930     // Move argv into the correct register.
    931     __ mov(s1, a2);
    932   } else {
    933     // Compute the argv pointer in a callee-saved register.
    934     __ Lsa(s1, sp, a0, kPointerSizeLog2);
    935     __ Subu(s1, s1, kPointerSize);
    936   }
    937 
    938   // Enter the exit frame that transitions from JavaScript to C++.
    939   FrameScope scope(masm, StackFrame::MANUAL);
    940   __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
    941                                            ? StackFrame::BUILTIN_EXIT
    942                                            : StackFrame::EXIT);
    943 
    944   // s0: number of arguments  including receiver (C callee-saved)
    945   // s1: pointer to first argument (C callee-saved)
    946   // s2: pointer to builtin function (C callee-saved)
    947 
    948   // Prepare arguments for C routine.
    949   // a0 = argc
    950   __ mov(s0, a0);
    951   __ mov(s2, a1);
    952 
    953   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
    954   // also need to reserve the 4 argument slots on the stack.
    955 
    956   __ AssertStackIsAligned();
    957 
    958   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
    959   int frame_alignment_mask = frame_alignment - 1;
    960   int result_stack_size;
    961   if (result_size() <= 2) {
    962     // a0 = argc, a1 = argv, a2 = isolate
    963     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
    964     __ mov(a1, s1);
    965     result_stack_size = 0;
    966   } else {
    967     DCHECK_EQ(3, result_size());
    968     // Allocate additional space for the result.
    969     result_stack_size =
    970         ((result_size() * kPointerSize) + frame_alignment_mask) &
    971         ~frame_alignment_mask;
    972     __ Subu(sp, sp, Operand(result_stack_size));
    973 
    974     // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
    975     __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
    976     __ mov(a2, s1);
    977     __ mov(a1, a0);
    978     __ mov(a0, sp);
    979   }
    980 
    981   // To let the GC traverse the return address of the exit frames, we need to
    982   // know where the return address is. The CEntryStub is unmovable, so
    983   // we can store the address on the stack to be able to find it again and
    984   // we never have to restore it, because it will not change.
    985   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
    986     int kNumInstructionsToJump = 4;
    987     Label find_ra;
    988     // Adjust the value in ra to point to the correct return location, 2nd
    989     // instruction past the real call into C code (the jalr(t9)), and push it.
    990     // This is the return address of the exit frame.
    991     if (kArchVariant >= kMips32r6) {
    992       __ addiupc(ra, kNumInstructionsToJump + 1);
    993     } else {
    994       // This branch-and-link sequence is needed to find the current PC on mips
    995       // before r6, saved to the ra register.
    996       __ bal(&find_ra);  // bal exposes branch delay slot.
    997       __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
    998     }
    999     __ bind(&find_ra);
   1000 
   1001     // This spot was reserved in EnterExitFrame.
   1002     __ sw(ra, MemOperand(sp, result_stack_size));
   1003     // Stack space reservation moved to the branch delay slot below.
   1004     // Stack is still aligned.
   1005 
   1006     // Call the C routine.
   1007     __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
   1008     __ jalr(t9);
   1009     // Set up sp in the delay slot.
   1010     __ addiu(sp, sp, -kCArgsSlotsSize);
   1011     // Make sure the stored 'ra' points to this position.
   1012     DCHECK_EQ(kNumInstructionsToJump,
   1013               masm->InstructionsGeneratedSince(&find_ra));
   1014   }
   1015   if (result_size() > 2) {
   1016     DCHECK_EQ(3, result_size());
   1017     // Read result values stored on stack.
   1018     __ lw(a0, MemOperand(v0, 2 * kPointerSize));
   1019     __ lw(v1, MemOperand(v0, 1 * kPointerSize));
   1020     __ lw(v0, MemOperand(v0, 0 * kPointerSize));
   1021   }
   1022   // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
   1023 
   1024   // Check result for exception sentinel.
   1025   Label exception_returned;
   1026   __ LoadRoot(t0, Heap::kExceptionRootIndex);
   1027   __ Branch(&exception_returned, eq, t0, Operand(v0));
   1028 
   1029   // Check that there is no pending exception, otherwise we
   1030   // should have returned the exception sentinel.
   1031   if (FLAG_debug_code) {
   1032     Label okay;
   1033     ExternalReference pending_exception_address(
   1034         Isolate::kPendingExceptionAddress, isolate());
   1035     __ li(a2, Operand(pending_exception_address));
   1036     __ lw(a2, MemOperand(a2));
   1037     __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
   1038     // Cannot use check here as it attempts to generate call into runtime.
   1039     __ Branch(&okay, eq, t0, Operand(a2));
   1040     __ stop("Unexpected pending exception");
   1041     __ bind(&okay);
   1042   }
   1043 
   1044   // Exit C frame and return.
   1045   // v0:v1: result
   1046   // sp: stack pointer
   1047   // fp: frame pointer
   1048   Register argc;
   1049   if (argv_in_register()) {
   1050     // We don't want to pop arguments so set argc to no_reg.
   1051     argc = no_reg;
   1052   } else {
   1053     // s0: still holds argc (callee-saved).
   1054     argc = s0;
   1055   }
   1056   __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
   1057 
   1058   // Handling of exception.
   1059   __ bind(&exception_returned);
   1060 
   1061   ExternalReference pending_handler_context_address(
   1062       Isolate::kPendingHandlerContextAddress, isolate());
   1063   ExternalReference pending_handler_code_address(
   1064       Isolate::kPendingHandlerCodeAddress, isolate());
   1065   ExternalReference pending_handler_offset_address(
   1066       Isolate::kPendingHandlerOffsetAddress, isolate());
   1067   ExternalReference pending_handler_fp_address(
   1068       Isolate::kPendingHandlerFPAddress, isolate());
   1069   ExternalReference pending_handler_sp_address(
   1070       Isolate::kPendingHandlerSPAddress, isolate());
   1071 
   1072   // Ask the runtime for help to determine the handler. This will set v0 to
   1073   // contain the current pending exception, don't clobber it.
   1074   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
   1075                                  isolate());
   1076   {
   1077     FrameScope scope(masm, StackFrame::MANUAL);
   1078     __ PrepareCallCFunction(3, 0, a0);
   1079     __ mov(a0, zero_reg);
   1080     __ mov(a1, zero_reg);
   1081     __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   1082     __ CallCFunction(find_handler, 3);
   1083   }
   1084 
   1085   // Retrieve the handler context, SP and FP.
   1086   __ li(cp, Operand(pending_handler_context_address));
   1087   __ lw(cp, MemOperand(cp));
   1088   __ li(sp, Operand(pending_handler_sp_address));
   1089   __ lw(sp, MemOperand(sp));
   1090   __ li(fp, Operand(pending_handler_fp_address));
   1091   __ lw(fp, MemOperand(fp));
   1092 
   1093   // If the handler is a JS frame, restore the context to the frame. Note that
   1094   // the context will be set to (cp == 0) for non-JS frames.
   1095   Label zero;
   1096   __ Branch(&zero, eq, cp, Operand(zero_reg));
   1097   __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1098   __ bind(&zero);
   1099 
   1100   // Compute the handler entry address and jump to it.
   1101   __ li(a1, Operand(pending_handler_code_address));
   1102   __ lw(a1, MemOperand(a1));
   1103   __ li(a2, Operand(pending_handler_offset_address));
   1104   __ lw(a2, MemOperand(a2));
   1105   __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
   1106   __ Addu(t9, a1, a2);
   1107   __ Jump(t9);
   1108 }
   1109 
   1110 
   1111 void JSEntryStub::Generate(MacroAssembler* masm) {
   1112   Label invoke, handler_entry, exit;
   1113   Isolate* isolate = masm->isolate();
   1114 
   1115   // Registers:
   1116   // a0: entry address
   1117   // a1: function
   1118   // a2: receiver
   1119   // a3: argc
   1120   //
   1121   // Stack:
   1122   // 4 args slots
   1123   // args
   1124 
   1125   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1126 
   1127   // Save callee saved registers on the stack.
   1128   __ MultiPush(kCalleeSaved | ra.bit());
   1129 
   1130   // Save callee-saved FPU registers.
   1131   __ MultiPushFPU(kCalleeSavedFPU);
   1132   // Set up the reserved register for 0.0.
   1133   __ Move(kDoubleRegZero, 0.0);
   1134 
   1135 
   1136   // Load argv in s0 register.
   1137   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   1138   offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
   1139 
   1140   __ InitializeRootRegister();
   1141   __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
   1142 
   1143   // We build an EntryFrame.
   1144   __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
   1145   StackFrame::Type marker = type();
   1146   __ li(t2, Operand(StackFrame::TypeToMarker(marker)));
   1147   __ li(t1, Operand(StackFrame::TypeToMarker(marker)));
   1148   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
   1149                                       isolate)));
   1150   __ lw(t0, MemOperand(t0));
   1151   __ Push(t3, t2, t1, t0);
   1152   // Set up frame pointer for the frame to be pushed.
   1153   __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
   1154 
   1155   // Registers:
   1156   // a0: entry_address
   1157   // a1: function
   1158   // a2: receiver_pointer
   1159   // a3: argc
   1160   // s0: argv
   1161   //
   1162   // Stack:
   1163   // caller fp          |
   1164   // function slot      | entry frame
   1165   // context slot       |
   1166   // bad fp (0xff...f)  |
   1167   // callee saved registers + ra
   1168   // 4 args slots
   1169   // args
   1170 
   1171   // If this is the outermost JS call, set js_entry_sp value.
   1172   Label non_outermost_js;
   1173   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
   1174   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   1175   __ lw(t2, MemOperand(t1));
   1176   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
   1177   __ sw(fp, MemOperand(t1));
   1178   __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1179   Label cont;
   1180   __ b(&cont);
   1181   __ nop();   // Branch delay slot nop.
   1182   __ bind(&non_outermost_js);
   1183   __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
   1184   __ bind(&cont);
   1185   __ push(t0);
   1186 
   1187   // Jump to a faked try block that does the invoke, with a faked catch
   1188   // block that sets the pending exception.
   1189   __ jmp(&invoke);
   1190   __ bind(&handler_entry);
   1191   handler_offset_ = handler_entry.pos();
   1192   // Caught exception: Store result (exception) in the pending exception
   1193   // field in the JSEnv and return a failure sentinel.  Coming in here the
   1194   // fp will be invalid because the PushStackHandler below sets it to 0 to
   1195   // signal the existence of the JSEntry frame.
   1196   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1197                                       isolate)));
   1198   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
   1199   __ LoadRoot(v0, Heap::kExceptionRootIndex);
   1200   __ b(&exit);  // b exposes branch delay slot.
   1201   __ nop();   // Branch delay slot nop.
   1202 
   1203   // Invoke: Link this frame into the handler chain.
   1204   __ bind(&invoke);
   1205   __ PushStackHandler();
   1206   // If an exception not caught by another handler occurs, this handler
   1207   // returns control to the code after the bal(&invoke) above, which
   1208   // restores all kCalleeSaved registers (including cp and fp) to their
   1209   // saved values before returning a failure to C.
   1210 
   1211   // Invoke the function by calling through JS entry trampoline builtin.
   1212   // Notice that we cannot store a reference to the trampoline code directly in
   1213   // this stub, because runtime stubs are not traversed when doing GC.
   1214 
   1215   // Registers:
   1216   // a0: entry_address
   1217   // a1: function
   1218   // a2: receiver_pointer
   1219   // a3: argc
   1220   // s0: argv
   1221   //
   1222   // Stack:
   1223   // handler frame
   1224   // entry frame
   1225   // callee saved registers + ra
   1226   // 4 args slots
   1227   // args
   1228 
   1229   if (type() == StackFrame::ENTRY_CONSTRUCT) {
   1230     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
   1231                                       isolate);
   1232     __ li(t0, Operand(construct_entry));
   1233   } else {
   1234     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
   1235     __ li(t0, Operand(entry));
   1236   }
   1237   __ lw(t9, MemOperand(t0));  // Deref address.
   1238 
   1239   // Call JSEntryTrampoline.
   1240   __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
   1241   __ Call(t9);
   1242 
   1243   // Unlink this frame from the handler chain.
   1244   __ PopStackHandler();
   1245 
   1246   __ bind(&exit);  // v0 holds result
   1247   // Check if the current stack frame is marked as the outermost JS frame.
   1248   Label non_outermost_js_2;
   1249   __ pop(t1);
   1250   __ Branch(&non_outermost_js_2, ne, t1,
   1251             Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1252   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   1253   __ sw(zero_reg, MemOperand(t1));
   1254   __ bind(&non_outermost_js_2);
   1255 
   1256   // Restore the top frame descriptors from the stack.
   1257   __ pop(t1);
   1258   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
   1259                                       isolate)));
   1260   __ sw(t1, MemOperand(t0));
   1261 
   1262   // Reset the stack to the callee saved registers.
   1263   __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
   1264 
   1265   // Restore callee-saved fpu registers.
   1266   __ MultiPopFPU(kCalleeSavedFPU);
   1267 
   1268   // Restore callee saved registers from the stack.
   1269   __ MultiPop(kCalleeSaved | ra.bit());
   1270   // Return.
   1271   __ Jump(ra);
   1272 }
   1273 
   1274 void RegExpExecStub::Generate(MacroAssembler* masm) {
   1275   // Just jump directly to runtime if native RegExp is not selected at compile
   1276   // time or if regexp entry in generated code is turned off runtime switch or
   1277   // at compilation.
   1278 #ifdef V8_INTERPRETED_REGEXP
   1279   __ TailCallRuntime(Runtime::kRegExpExec);
   1280 #else  // V8_INTERPRETED_REGEXP
   1281 
   1282   // Stack frame on entry.
   1283   //  sp[0]: last_match_info (expected JSArray)
   1284   //  sp[4]: previous index
   1285   //  sp[8]: subject string
   1286   //  sp[12]: JSRegExp object
   1287 
   1288   const int kLastMatchInfoOffset = 0 * kPointerSize;
   1289   const int kPreviousIndexOffset = 1 * kPointerSize;
   1290   const int kSubjectOffset = 2 * kPointerSize;
   1291   const int kJSRegExpOffset = 3 * kPointerSize;
   1292 
   1293   Label runtime;
   1294   // Allocation of registers for this function. These are in callee save
   1295   // registers and will be preserved by the call to the native RegExp code, as
   1296   // this code is called using the normal C calling convention. When calling
   1297   // directly from generated code the native RegExp code will not do a GC and
   1298   // therefore the content of these registers are safe to use after the call.
   1299   // MIPS - using s0..s2, since we are not using CEntry Stub.
   1300   Register subject = s0;
   1301   Register regexp_data = s1;
   1302   Register last_match_info_elements = s2;
   1303 
   1304   // Ensure that a RegExp stack is allocated.
   1305   ExternalReference address_of_regexp_stack_memory_address =
   1306       ExternalReference::address_of_regexp_stack_memory_address(isolate());
   1307   ExternalReference address_of_regexp_stack_memory_size =
   1308       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   1309   __ li(a0, Operand(address_of_regexp_stack_memory_size));
   1310   __ lw(a0, MemOperand(a0, 0));
   1311   __ Branch(&runtime, eq, a0, Operand(zero_reg));
   1312 
   1313   // Check that the first argument is a JSRegExp object.
   1314   __ lw(a0, MemOperand(sp, kJSRegExpOffset));
   1315   STATIC_ASSERT(kSmiTag == 0);
   1316   __ JumpIfSmi(a0, &runtime);
   1317   __ GetObjectType(a0, a1, a1);
   1318   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
   1319 
   1320   // Check that the RegExp has been compiled (data contains a fixed array).
   1321   __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
   1322   if (FLAG_debug_code) {
   1323     __ SmiTst(regexp_data, t0);
   1324     __ Check(nz,
   1325              kUnexpectedTypeForRegExpDataFixedArrayExpected,
   1326              t0,
   1327              Operand(zero_reg));
   1328     __ GetObjectType(regexp_data, a0, a0);
   1329     __ Check(eq,
   1330              kUnexpectedTypeForRegExpDataFixedArrayExpected,
   1331              a0,
   1332              Operand(FIXED_ARRAY_TYPE));
   1333   }
   1334 
   1335   // regexp_data: RegExp data (FixedArray)
   1336   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   1337   __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   1338   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
   1339 
   1340   // regexp_data: RegExp data (FixedArray)
   1341   // Check that the number of captures fit in the static offsets vector buffer.
   1342   __ lw(a2,
   1343          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   1344   // Check (number_of_captures + 1) * 2 <= offsets vector size
   1345   // Or          number_of_captures * 2 <= offsets vector size - 2
   1346   // Multiplying by 2 comes for free since a2 is smi-tagged.
   1347   STATIC_ASSERT(kSmiTag == 0);
   1348   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   1349   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   1350   __ Branch(
   1351       &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
   1352 
   1353   // Reset offset for possibly sliced string.
   1354   __ mov(t0, zero_reg);
   1355   __ lw(subject, MemOperand(sp, kSubjectOffset));
   1356   __ JumpIfSmi(subject, &runtime);
   1357   __ mov(a3, subject);  // Make a copy of the original subject string.
   1358   // subject: subject string
   1359   // a3: subject string
   1360   // regexp_data: RegExp data (FixedArray)
   1361   // Handle subject string according to its encoding and representation:
   1362   // (1) Sequential string?  If yes, go to (4).
   1363   // (2) Sequential or cons?  If not, go to (5).
   1364   // (3) Cons string.  If the string is flat, replace subject with first string
   1365   //     and go to (1). Otherwise bail out to runtime.
   1366   // (4) Sequential string.  Load regexp code according to encoding.
   1367   // (E) Carry on.
   1368   /// [...]
   1369 
   1370   // Deferred code at the end of the stub:
   1371   // (5) Long external string?  If not, go to (7).
   1372   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1373   //     Go to (4).
   1374   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1375   // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
   1376 
   1377   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
   1378       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
   1379 
   1380   __ bind(&check_underlying);
   1381   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   1382   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   1383 
   1384   // (1) Sequential string?  If yes, go to (4).
   1385   __ And(a1,
   1386          a0,
   1387          Operand(kIsNotStringMask |
   1388                  kStringRepresentationMask |
   1389                  kShortExternalStringMask));
   1390   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   1391   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
   1392 
   1393   // (2) Sequential or cons?  If not, go to (5).
   1394   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   1395   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   1396   STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   1397   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   1398   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   1399   // Go to (5).
   1400   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
   1401 
   1402   // (3) Cons string.  Check that it's flat.
   1403   // Replace subject with first string and reload instance type.
   1404   __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
   1405   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
   1406   __ Branch(&runtime, ne, a0, Operand(a1));
   1407   __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   1408   __ jmp(&check_underlying);
   1409 
   1410   // (4) Sequential string.  Load regexp code according to encoding.
   1411   __ bind(&seq_string);
   1412   // subject: sequential subject string (or look-alike, external string)
   1413   // a3: original subject string
   1414   // Load previous index and check range before a3 is overwritten.  We have to
   1415   // use a3 instead of subject here because subject might have been only made
   1416   // to look like a sequential string when it actually is an external string.
   1417   __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
   1418   __ JumpIfNotSmi(a1, &runtime);
   1419   __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
   1420   __ Branch(&runtime, ls, a3, Operand(a1));
   1421   __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
   1422 
   1423   STATIC_ASSERT(kStringEncodingMask == 8);
   1424   STATIC_ASSERT(kOneByteStringTag == 8);
   1425   STATIC_ASSERT(kTwoByteStringTag == 0);
   1426   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one-byte.
   1427   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
   1428   __ sra(a3, a0, 3);  // a3 is 1 for ASCII, 0 for UC16 (used below).
   1429   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   1430   __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
   1431 
   1432   // (E) Carry on.  String handling is done.
   1433   // t9: irregexp code
   1434   // Check that the irregexp code has been generated for the actual string
   1435   // encoding. If it has, the field contains a code object otherwise it contains
   1436   // a smi (code flushing support).
   1437   __ JumpIfSmi(t9, &runtime);
   1438 
   1439   // a1: previous index
   1440   // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
   1441   // t9: code
   1442   // subject: Subject string
   1443   // regexp_data: RegExp data (FixedArray)
   1444   // All checks done. Now push arguments for native regexp code.
   1445   __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
   1446                       1, a0, a2);
   1447 
   1448   // Isolates: note we add an additional parameter here (isolate pointer).
   1449   const int kRegExpExecuteArguments = 9;
   1450   const int kParameterRegisters = 4;
   1451   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
   1452 
   1453   // Stack pointer now points to cell where return address is to be written.
   1454   // Arguments are before that on the stack or in registers, meaning we
   1455   // treat the return address as argument 5. Thus every argument after that
   1456   // needs to be shifted back by 1. Since DirectCEntryStub will handle
   1457   // allocating space for the c argument slots, we don't need to calculate
   1458   // that into the argument positions on the stack. This is how the stack will
   1459   // look (sp meaning the value of sp at this moment):
   1460   // [sp + 5] - Argument 9
   1461   // [sp + 4] - Argument 8
   1462   // [sp + 3] - Argument 7
   1463   // [sp + 2] - Argument 6
   1464   // [sp + 1] - Argument 5
   1465   // [sp + 0] - saved ra
   1466 
   1467   // Argument 9: Pass current isolate address.
   1468   // CFunctionArgumentOperand handles MIPS stack argument slots.
   1469   __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
   1470   __ sw(a0, MemOperand(sp, 5 * kPointerSize));
   1471 
   1472   // Argument 8: Indicate that this is a direct call from JavaScript.
   1473   __ li(a0, Operand(1));
   1474   __ sw(a0, MemOperand(sp, 4 * kPointerSize));
   1475 
   1476   // Argument 7: Start (high end) of backtracking stack memory area.
   1477   __ li(a0, Operand(address_of_regexp_stack_memory_address));
   1478   __ lw(a0, MemOperand(a0, 0));
   1479   __ li(a2, Operand(address_of_regexp_stack_memory_size));
   1480   __ lw(a2, MemOperand(a2, 0));
   1481   __ addu(a0, a0, a2);
   1482   __ sw(a0, MemOperand(sp, 3 * kPointerSize));
   1483 
   1484   // Argument 6: Set the number of capture registers to zero to force global
   1485   // regexps to behave as non-global.  This does not affect non-global regexps.
   1486   __ mov(a0, zero_reg);
   1487   __ sw(a0, MemOperand(sp, 2 * kPointerSize));
   1488 
   1489   // Argument 5: static offsets vector buffer.
   1490   __ li(a0, Operand(
   1491         ExternalReference::address_of_static_offsets_vector(isolate())));
   1492   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
   1493 
   1494   // For arguments 4 and 3 get string length, calculate start of string data
   1495   // calculate the shift of the index (0 for one-byte and 1 for two-byte).
   1496   __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   1497   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   1498   // Load the length from the original subject string from the previous stack
   1499   // frame. Therefore we have to use fp, which points exactly to two pointer
   1500   // sizes below the previous sp. (Because creating a new stack frame pushes
   1501   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
   1502   __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   1503   // If slice offset is not 0, load the length from the original sliced string.
   1504   // Argument 4, a3: End of string data
   1505   // Argument 3, a2: Start of string data
   1506   // Prepare start and end index of the input.
   1507   __ sllv(t1, t0, a3);
   1508   __ addu(t0, t2, t1);
   1509   __ sllv(t1, a1, a3);
   1510   __ addu(a2, t0, t1);
   1511 
   1512   __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
   1513   __ sra(t2, t2, kSmiTagSize);
   1514   __ sllv(t1, t2, a3);
   1515   __ addu(a3, t0, t1);
   1516   // Argument 2 (a1): Previous index.
   1517   // Already there
   1518 
   1519   // Argument 1 (a0): Subject string.
   1520   __ mov(a0, subject);
   1521 
   1522   // Locate the code entry and call it.
   1523   __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
   1524   DirectCEntryStub stub(isolate());
   1525   stub.GenerateCall(masm, t9);
   1526 
   1527   __ LeaveExitFrame(false, no_reg, true);
   1528 
   1529   // v0: result
   1530   // subject: subject string (callee saved)
   1531   // regexp_data: RegExp data (callee saved)
   1532   // last_match_info_elements: Last match info elements (callee saved)
   1533   // Check the result.
   1534   Label success;
   1535   __ Branch(&success, eq, v0, Operand(1));
   1536   // We expect exactly one result since we force the called regexp to behave
   1537   // as non-global.
   1538   Label failure;
   1539   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
   1540   // If not exception it can only be retry. Handle that in the runtime system.
   1541   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   1542   // Result must now be exception. If there is no pending exception already a
   1543   // stack overflow (on the backtrack stack) was detected in RegExp code but
   1544   // haven't created the exception yet. Handle that in the runtime system.
   1545   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   1546   __ li(a1, Operand(isolate()->factory()->the_hole_value()));
   1547   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1548                                       isolate())));
   1549   __ lw(v0, MemOperand(a2, 0));
   1550   __ Branch(&runtime, eq, v0, Operand(a1));
   1551 
   1552   // For exception, throw the exception again.
   1553   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
   1554 
   1555   __ bind(&failure);
   1556   // For failure and exception return null.
   1557   __ li(v0, Operand(isolate()->factory()->null_value()));
   1558   __ DropAndRet(4);
   1559 
   1560   // Process the result from the native regexp code.
   1561   __ bind(&success);
   1562   __ lw(a1,
   1563          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   1564   // Calculate number of capture registers (number_of_captures + 1) * 2.
   1565   // Multiplying by 2 comes for free since r1 is smi-tagged.
   1566   STATIC_ASSERT(kSmiTag == 0);
   1567   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   1568   __ Addu(a1, a1, Operand(2));  // a1 was a smi.
   1569 
   1570   // Check that the last match info is a FixedArray.
   1571   __ lw(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
   1572   __ JumpIfSmi(last_match_info_elements, &runtime);
   1573   // Check that the object has fast elements.
   1574   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   1575   __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
   1576   __ Branch(&runtime, ne, a0, Operand(at));
   1577   // Check that the last match info has space for the capture registers and the
   1578   // additional information.
   1579   __ lw(a0,
   1580         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
   1581   __ Addu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
   1582   __ sra(at, a0, kSmiTagSize);
   1583   __ Branch(&runtime, gt, a2, Operand(at));
   1584 
   1585   // a1: number of capture registers
   1586   // subject: subject string
   1587   // Store the capture count.
   1588   __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
   1589   __ sw(a2, FieldMemOperand(last_match_info_elements,
   1590                             RegExpMatchInfo::kNumberOfCapturesOffset));
   1591   // Store last subject and last input.
   1592   __ sw(subject, FieldMemOperand(last_match_info_elements,
   1593                                  RegExpMatchInfo::kLastSubjectOffset));
   1594   __ mov(a2, subject);
   1595   __ RecordWriteField(last_match_info_elements,
   1596                       RegExpMatchInfo::kLastSubjectOffset, subject, t3,
   1597                       kRAHasNotBeenSaved, kDontSaveFPRegs);
   1598   __ mov(subject, a2);
   1599   __ sw(subject, FieldMemOperand(last_match_info_elements,
   1600                                  RegExpMatchInfo::kLastInputOffset));
   1601   __ RecordWriteField(last_match_info_elements,
   1602                       RegExpMatchInfo::kLastInputOffset, subject, t3,
   1603                       kRAHasNotBeenSaved, kDontSaveFPRegs);
   1604 
   1605   // Get the static offsets vector filled by the native regexp code.
   1606   ExternalReference address_of_static_offsets_vector =
   1607       ExternalReference::address_of_static_offsets_vector(isolate());
   1608   __ li(a2, Operand(address_of_static_offsets_vector));
   1609 
   1610   // a1: number of capture registers
   1611   // a2: offsets vector
   1612   Label next_capture, done;
   1613   // Capture register counter starts from number of capture registers and
   1614   // counts down until wrapping after zero.
   1615   __ Addu(a0, last_match_info_elements,
   1616           Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
   1617   __ bind(&next_capture);
   1618   __ Subu(a1, a1, Operand(1));
   1619   __ Branch(&done, lt, a1, Operand(zero_reg));
   1620   // Read the value from the static offsets vector buffer.
   1621   __ lw(a3, MemOperand(a2, 0));
   1622   __ addiu(a2, a2, kPointerSize);
   1623   // Store the smi value in the last match info.
   1624   __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
   1625   __ sw(a3, MemOperand(a0, 0));
   1626   __ Branch(&next_capture, USE_DELAY_SLOT);
   1627   __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
   1628 
   1629   __ bind(&done);
   1630 
   1631   // Return last match info.
   1632   __ mov(v0, last_match_info_elements);
   1633   __ DropAndRet(4);
   1634 
   1635   // Do the runtime call to execute the regexp.
   1636   __ bind(&runtime);
   1637   __ TailCallRuntime(Runtime::kRegExpExec);
   1638 
   1639   // Deferred code for string handling.
   1640   // (5) Long external string?  If not, go to (7).
   1641   __ bind(&not_seq_nor_cons);
   1642   // Go to (7).
   1643   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
   1644 
   1645   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1646   __ bind(&external_string);
   1647   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   1648   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   1649   if (FLAG_debug_code) {
   1650     // Assert that we do not have a cons or slice (indirect strings) here.
   1651     // Sequential strings have already been ruled out.
   1652     __ And(at, a0, Operand(kIsIndirectStringMask));
   1653     __ Assert(eq,
   1654               kExternalStringExpectedButNotFound,
   1655               at,
   1656               Operand(zero_reg));
   1657   }
   1658   __ lw(subject,
   1659         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   1660   // Move the pointer so that offset-wise, it looks like a sequential string.
   1661   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   1662   __ Subu(subject,
   1663           subject,
   1664           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   1665   __ jmp(&seq_string);    // Go to (5).
   1666 
   1667   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1668   __ bind(&not_long_external);
   1669   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
   1670   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   1671   __ Branch(&runtime, ne, at, Operand(zero_reg));
   1672 
   1673   // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
   1674   Label thin_string;
   1675   __ Branch(&thin_string, eq, a1, Operand(kThinStringTag));
   1676   // Load offset into t0 and replace subject string with parent.
   1677   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   1678   __ sra(t0, t0, kSmiTagSize);
   1679   __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   1680   __ jmp(&check_underlying);  // Go to (4).
   1681 
   1682   __ bind(&thin_string);
   1683   __ lw(subject, FieldMemOperand(subject, ThinString::kActualOffset));
   1684   __ jmp(&check_underlying);  // Go to (4).
   1685 #endif  // V8_INTERPRETED_REGEXP
   1686 }
   1687 
   1688 
   1689 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
   1690   // a0 : number of arguments to the construct function
   1691   // a2 : feedback vector
   1692   // a3 : slot in feedback vector (Smi)
   1693   // a1 : the function to call
   1694   FrameScope scope(masm, StackFrame::INTERNAL);
   1695   const RegList kSavedRegs = 1 << 4 |  // a0
   1696                              1 << 5 |  // a1
   1697                              1 << 6 |  // a2
   1698                              1 << 7 |  // a3
   1699                              1 << cp.code();
   1700 
   1701   // Number-of-arguments register must be smi-tagged to call out.
   1702   __ SmiTag(a0);
   1703   __ MultiPush(kSavedRegs);
   1704 
   1705   __ CallStub(stub);
   1706 
   1707   __ MultiPop(kSavedRegs);
   1708   __ SmiUntag(a0);
   1709 }
   1710 
   1711 
   1712 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   1713   // Cache the called function in a feedback vector slot.  Cache states
   1714   // are uninitialized, monomorphic (indicated by a JSFunction), and
   1715   // megamorphic.
   1716   // a0 : number of arguments to the construct function
   1717   // a1 : the function to call
   1718   // a2 : feedback vector
   1719   // a3 : slot in feedback vector (Smi)
   1720   Label initialize, done, miss, megamorphic, not_array_function;
   1721 
   1722   DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
   1723             masm->isolate()->heap()->megamorphic_symbol());
   1724   DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
   1725             masm->isolate()->heap()->uninitialized_symbol());
   1726 
   1727   // Load the cache state into t2.
   1728   __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   1729   __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
   1730 
   1731   // A monomorphic cache hit or an already megamorphic state: invoke the
   1732   // function without changing the state.
   1733   // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
   1734   // this position in a symbol (see static asserts in feedback-vector.h).
   1735   Label check_allocation_site;
   1736   Register feedback_map = t1;
   1737   Register weak_value = t4;
   1738   __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
   1739   __ Branch(&done, eq, a1, Operand(weak_value));
   1740   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   1741   __ Branch(&done, eq, t2, Operand(at));
   1742   __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
   1743   __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
   1744   __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
   1745 
   1746   // If the weak cell is cleared, we have a new chance to become monomorphic.
   1747   __ JumpIfSmi(weak_value, &initialize);
   1748   __ jmp(&megamorphic);
   1749 
   1750   __ bind(&check_allocation_site);
   1751   // If we came here, we need to see if we are the array function.
   1752   // If we didn't have a matching function, and we didn't find the megamorph
   1753   // sentinel, then we have in the slot either some other function or an
   1754   // AllocationSite.
   1755   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   1756   __ Branch(&miss, ne, feedback_map, Operand(at));
   1757 
   1758   // Make sure the function is the Array() function
   1759   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
   1760   __ Branch(&megamorphic, ne, a1, Operand(t2));
   1761   __ jmp(&done);
   1762 
   1763   __ bind(&miss);
   1764 
   1765   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   1766   // megamorphic.
   1767   __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
   1768   __ Branch(&initialize, eq, t2, Operand(at));
   1769   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   1770   // write-barrier is needed.
   1771   __ bind(&megamorphic);
   1772   __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   1773   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   1774   __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
   1775   __ jmp(&done);
   1776 
   1777   // An uninitialized cache is patched with the function.
   1778   __ bind(&initialize);
   1779   // Make sure the function is the Array() function.
   1780   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
   1781   __ Branch(&not_array_function, ne, a1, Operand(t2));
   1782 
   1783   // The target function is the Array constructor,
   1784   // Create an AllocationSite if we don't already have it, store it in the
   1785   // slot.
   1786   CreateAllocationSiteStub create_stub(masm->isolate());
   1787   CallStubInRecordCallTarget(masm, &create_stub);
   1788   __ Branch(&done);
   1789 
   1790   __ bind(&not_array_function);
   1791   CreateWeakCellStub weak_cell_stub(masm->isolate());
   1792   CallStubInRecordCallTarget(masm, &weak_cell_stub);
   1793 
   1794   __ bind(&done);
   1795 
   1796   // Increment the call count for all function calls.
   1797   __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   1798   __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
   1799   __ Addu(t0, t0, Operand(Smi::FromInt(1)));
   1800   __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
   1801 }
   1802 
   1803 
   1804 void CallConstructStub::Generate(MacroAssembler* masm) {
   1805   // a0 : number of arguments
   1806   // a1 : the function to call
   1807   // a2 : feedback vector
   1808   // a3 : slot in feedback vector (Smi, for RecordCallTarget)
   1809 
   1810   Label non_function;
   1811   // Check that the function is not a smi.
   1812   __ JumpIfSmi(a1, &non_function);
   1813   // Check that the function is a JSFunction.
   1814   __ GetObjectType(a1, t1, t1);
   1815   __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
   1816 
   1817   GenerateRecordCallTarget(masm);
   1818 
   1819   __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   1820   Label feedback_register_initialized;
   1821   // Put the AllocationSite from the feedback vector into a2, or undefined.
   1822   __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
   1823   __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
   1824   __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   1825   __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
   1826   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
   1827   __ bind(&feedback_register_initialized);
   1828 
   1829   __ AssertUndefinedOrAllocationSite(a2, t1);
   1830 
   1831   // Pass function as new target.
   1832   __ mov(a3, a1);
   1833 
   1834   // Tail call to the function-specific construct stub (still in the caller
   1835   // context at this point).
   1836   __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   1837   __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
   1838   __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
   1839   __ Jump(at);
   1840 
   1841   __ bind(&non_function);
   1842   __ mov(a3, a1);
   1843   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   1844 }
   1845 
   1846 // StringCharCodeAtGenerator.
   1847 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   1848   DCHECK(!t0.is(index_));
   1849   DCHECK(!t0.is(result_));
   1850   DCHECK(!t0.is(object_));
   1851   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
   1852     // If the receiver is a smi trigger the non-string case.
   1853     __ JumpIfSmi(object_, receiver_not_string_);
   1854 
   1855     // Fetch the instance type of the receiver into result register.
   1856     __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   1857     __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   1858     // If the receiver is not a string trigger the non-string case.
   1859     __ And(t0, result_, Operand(kIsNotStringMask));
   1860     __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
   1861   }
   1862 
   1863   // If the index is non-smi trigger the non-smi case.
   1864   __ JumpIfNotSmi(index_, &index_not_smi_);
   1865 
   1866   __ bind(&got_smi_index_);
   1867 
   1868   // Check for index out of range.
   1869   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
   1870   __ Branch(index_out_of_range_, ls, t0, Operand(index_));
   1871 
   1872   __ sra(index_, index_, kSmiTagSize);
   1873 
   1874   StringCharLoadGenerator::Generate(masm,
   1875                                     object_,
   1876                                     index_,
   1877                                     result_,
   1878                                     &call_runtime_);
   1879 
   1880   __ sll(result_, result_, kSmiTagSize);
   1881   __ bind(&exit_);
   1882 }
   1883 
   1884 
   1885 void StringCharCodeAtGenerator::GenerateSlow(
   1886     MacroAssembler* masm, EmbedMode embed_mode,
   1887     const RuntimeCallHelper& call_helper) {
   1888   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   1889 
   1890   // Index is not a smi.
   1891   __ bind(&index_not_smi_);
   1892   // If index is a heap number, try converting it to an integer.
   1893   __ CheckMap(index_,
   1894               result_,
   1895               Heap::kHeapNumberMapRootIndex,
   1896               index_not_number_,
   1897               DONT_DO_SMI_CHECK);
   1898   call_helper.BeforeCall(masm);
   1899   // Consumed by runtime conversion function:
   1900   if (embed_mode == PART_OF_IC_HANDLER) {
   1901     __ Push(LoadWithVectorDescriptor::VectorRegister(),
   1902             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
   1903   } else {
   1904     __ Push(object_, index_);
   1905   }
   1906   __ CallRuntime(Runtime::kNumberToSmi);
   1907 
   1908   // Save the conversion result before the pop instructions below
   1909   // have a chance to overwrite it.
   1910   __ Move(index_, v0);
   1911   if (embed_mode == PART_OF_IC_HANDLER) {
   1912     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
   1913            LoadWithVectorDescriptor::SlotRegister(), object_);
   1914   } else {
   1915     __ pop(object_);
   1916   }
   1917   // Reload the instance type.
   1918   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   1919   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   1920   call_helper.AfterCall(masm);
   1921   // If index is still not a smi, it must be out of range.
   1922   __ JumpIfNotSmi(index_, index_out_of_range_);
   1923   // Otherwise, return to the fast path.
   1924   __ Branch(&got_smi_index_);
   1925 
   1926   // Call runtime. We get here when the receiver is a string and the
   1927   // index is a number, but the code of getting the actual character
   1928   // is too complex (e.g., when the string needs to be flattened).
   1929   __ bind(&call_runtime_);
   1930   call_helper.BeforeCall(masm);
   1931   __ sll(index_, index_, kSmiTagSize);
   1932   __ Push(object_, index_);
   1933   __ CallRuntime(Runtime::kStringCharCodeAtRT);
   1934 
   1935   __ Move(result_, v0);
   1936 
   1937   call_helper.AfterCall(masm);
   1938   __ jmp(&exit_);
   1939 
   1940   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   1941 }
   1942 
   1943 void StringHelper::GenerateFlatOneByteStringEquals(
   1944     MacroAssembler* masm, Register left, Register right, Register scratch1,
   1945     Register scratch2, Register scratch3) {
   1946   Register length = scratch1;
   1947 
   1948   // Compare lengths.
   1949   Label strings_not_equal, check_zero_length;
   1950   __ lw(length, FieldMemOperand(left, String::kLengthOffset));
   1951   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
   1952   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
   1953   __ bind(&strings_not_equal);
   1954   DCHECK(is_int16(NOT_EQUAL));
   1955   __ Ret(USE_DELAY_SLOT);
   1956   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
   1957 
   1958   // Check if the length is zero.
   1959   Label compare_chars;
   1960   __ bind(&check_zero_length);
   1961   STATIC_ASSERT(kSmiTag == 0);
   1962   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
   1963   DCHECK(is_int16(EQUAL));
   1964   __ Ret(USE_DELAY_SLOT);
   1965   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   1966 
   1967   // Compare characters.
   1968   __ bind(&compare_chars);
   1969 
   1970   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
   1971                                   v0, &strings_not_equal);
   1972 
   1973   // Characters are equal.
   1974   __ Ret(USE_DELAY_SLOT);
   1975   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   1976 }
   1977 
   1978 
   1979 void StringHelper::GenerateCompareFlatOneByteStrings(
   1980     MacroAssembler* masm, Register left, Register right, Register scratch1,
   1981     Register scratch2, Register scratch3, Register scratch4) {
   1982   Label result_not_equal, compare_lengths;
   1983   // Find minimum length and length difference.
   1984   __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
   1985   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
   1986   __ Subu(scratch3, scratch1, Operand(scratch2));
   1987   Register length_delta = scratch3;
   1988   __ slt(scratch4, scratch2, scratch1);
   1989   __ Movn(scratch1, scratch2, scratch4);
   1990   Register min_length = scratch1;
   1991   STATIC_ASSERT(kSmiTag == 0);
   1992   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
   1993 
   1994   // Compare loop.
   1995   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
   1996                                   scratch4, v0, &result_not_equal);
   1997 
   1998   // Compare lengths - strings up to min-length are equal.
   1999   __ bind(&compare_lengths);
   2000   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   2001   // Use length_delta as result if it's zero.
   2002   __ mov(scratch2, length_delta);
   2003   __ mov(scratch4, zero_reg);
   2004   __ mov(v0, zero_reg);
   2005 
   2006   __ bind(&result_not_equal);
   2007   // Conditionally update the result based either on length_delta or
   2008   // the last comparion performed in the loop above.
   2009   Label ret;
   2010   __ Branch(&ret, eq, scratch2, Operand(scratch4));
   2011   __ li(v0, Operand(Smi::FromInt(GREATER)));
   2012   __ Branch(&ret, gt, scratch2, Operand(scratch4));
   2013   __ li(v0, Operand(Smi::FromInt(LESS)));
   2014   __ bind(&ret);
   2015   __ Ret();
   2016 }
   2017 
   2018 
   2019 void StringHelper::GenerateOneByteCharsCompareLoop(
   2020     MacroAssembler* masm, Register left, Register right, Register length,
   2021     Register scratch1, Register scratch2, Register scratch3,
   2022     Label* chars_not_equal) {
   2023   // Change index to run from -length to -1 by adding length to string
   2024   // start. This means that loop ends when index reaches zero, which
   2025   // doesn't need an additional compare.
   2026   __ SmiUntag(length);
   2027   __ Addu(scratch1, length,
   2028           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   2029   __ Addu(left, left, Operand(scratch1));
   2030   __ Addu(right, right, Operand(scratch1));
   2031   __ Subu(length, zero_reg, length);
   2032   Register index = length;  // index = -length;
   2033 
   2034 
   2035   // Compare loop.
   2036   Label loop;
   2037   __ bind(&loop);
   2038   __ Addu(scratch3, left, index);
   2039   __ lbu(scratch1, MemOperand(scratch3));
   2040   __ Addu(scratch3, right, index);
   2041   __ lbu(scratch2, MemOperand(scratch3));
   2042   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
   2043   __ Addu(index, index, 1);
   2044   __ Branch(&loop, ne, index, Operand(zero_reg));
   2045 }
   2046 
   2047 
   2048 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   2049   // ----------- S t a t e -------------
   2050   //  -- a1    : left
   2051   //  -- a0    : right
   2052   //  -- ra    : return address
   2053   // -----------------------------------
   2054 
   2055   // Load a2 with the allocation site. We stick an undefined dummy value here
   2056   // and replace it with the real allocation site later when we instantiate this
   2057   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   2058   __ li(a2, isolate()->factory()->undefined_value());
   2059 
   2060   // Make sure that we actually patched the allocation site.
   2061   if (FLAG_debug_code) {
   2062     __ And(at, a2, Operand(kSmiTagMask));
   2063     __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
   2064     __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
   2065     __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   2066     __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
   2067   }
   2068 
   2069   // Tail call into the stub that handles binary operations with allocation
   2070   // sites.
   2071   BinaryOpWithAllocationSiteStub stub(isolate(), state());
   2072   __ TailCallStub(&stub);
   2073 }
   2074 
   2075 
   2076 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   2077   DCHECK_EQ(CompareICState::BOOLEAN, state());
   2078   Label miss;
   2079 
   2080   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2081   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2082   if (!Token::IsEqualityOp(op())) {
   2083     __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
   2084     __ AssertSmi(a1);
   2085     __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
   2086     __ AssertSmi(a0);
   2087   }
   2088   __ Ret(USE_DELAY_SLOT);
   2089   __ Subu(v0, a1, a0);
   2090 
   2091   __ bind(&miss);
   2092   GenerateMiss(masm);
   2093 }
   2094 
   2095 
   2096 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   2097   DCHECK(state() == CompareICState::SMI);
   2098   Label miss;
   2099   __ Or(a2, a1, a0);
   2100   __ JumpIfNotSmi(a2, &miss);
   2101 
   2102   if (GetCondition() == eq) {
   2103     // For equality we do not care about the sign of the result.
   2104     __ Ret(USE_DELAY_SLOT);
   2105     __ Subu(v0, a0, a1);
   2106   } else {
   2107     // Untag before subtracting to avoid handling overflow.
   2108     __ SmiUntag(a1);
   2109     __ SmiUntag(a0);
   2110     __ Ret(USE_DELAY_SLOT);
   2111     __ Subu(v0, a1, a0);
   2112   }
   2113 
   2114   __ bind(&miss);
   2115   GenerateMiss(masm);
   2116 }
   2117 
   2118 
   2119 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
   2120   DCHECK(state() == CompareICState::NUMBER);
   2121 
   2122   Label generic_stub;
   2123   Label unordered, maybe_undefined1, maybe_undefined2;
   2124   Label miss;
   2125 
   2126   if (left() == CompareICState::SMI) {
   2127     __ JumpIfNotSmi(a1, &miss);
   2128   }
   2129   if (right() == CompareICState::SMI) {
   2130     __ JumpIfNotSmi(a0, &miss);
   2131   }
   2132 
   2133   // Inlining the double comparison and falling back to the general compare
   2134   // stub if NaN is involved.
   2135   // Load left and right operand.
   2136   Label done, left, left_smi, right_smi;
   2137   __ JumpIfSmi(a0, &right_smi);
   2138   __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
   2139               DONT_DO_SMI_CHECK);
   2140   __ Subu(a2, a0, Operand(kHeapObjectTag));
   2141   __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
   2142   __ Branch(&left);
   2143   __ bind(&right_smi);
   2144   __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
   2145   FPURegister single_scratch = f6;
   2146   __ mtc1(a2, single_scratch);
   2147   __ cvt_d_w(f2, single_scratch);
   2148 
   2149   __ bind(&left);
   2150   __ JumpIfSmi(a1, &left_smi);
   2151   __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
   2152               DONT_DO_SMI_CHECK);
   2153   __ Subu(a2, a1, Operand(kHeapObjectTag));
   2154   __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
   2155   __ Branch(&done);
   2156   __ bind(&left_smi);
   2157   __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
   2158   single_scratch = f8;
   2159   __ mtc1(a2, single_scratch);
   2160   __ cvt_d_w(f0, single_scratch);
   2161 
   2162   __ bind(&done);
   2163 
   2164   // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
   2165   Label fpu_eq, fpu_lt;
   2166   // Test if equal, and also handle the unordered/NaN case.
   2167   __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
   2168 
   2169   // Test if less (unordered case is already handled).
   2170   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
   2171 
   2172   // Otherwise it's greater, so just fall thru, and return.
   2173   DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
   2174   __ Ret(USE_DELAY_SLOT);
   2175   __ li(v0, Operand(GREATER));
   2176 
   2177   __ bind(&fpu_eq);
   2178   __ Ret(USE_DELAY_SLOT);
   2179   __ li(v0, Operand(EQUAL));
   2180 
   2181   __ bind(&fpu_lt);
   2182   __ Ret(USE_DELAY_SLOT);
   2183   __ li(v0, Operand(LESS));
   2184 
   2185   __ bind(&unordered);
   2186   __ bind(&generic_stub);
   2187   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
   2188                      CompareICState::GENERIC, CompareICState::GENERIC);
   2189   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   2190 
   2191   __ bind(&maybe_undefined1);
   2192   if (Token::IsOrderedRelationalCompareOp(op())) {
   2193     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2194     __ Branch(&miss, ne, a0, Operand(at));
   2195     __ JumpIfSmi(a1, &unordered);
   2196     __ GetObjectType(a1, a2, a2);
   2197     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
   2198     __ jmp(&unordered);
   2199   }
   2200 
   2201   __ bind(&maybe_undefined2);
   2202   if (Token::IsOrderedRelationalCompareOp(op())) {
   2203     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   2204     __ Branch(&unordered, eq, a1, Operand(at));
   2205   }
   2206 
   2207   __ bind(&miss);
   2208   GenerateMiss(masm);
   2209 }
   2210 
   2211 
   2212 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   2213   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   2214   Label miss;
   2215 
   2216   // Registers containing left and right operands respectively.
   2217   Register left = a1;
   2218   Register right = a0;
   2219   Register tmp1 = a2;
   2220   Register tmp2 = a3;
   2221 
   2222   // Check that both operands are heap objects.
   2223   __ JumpIfEitherSmi(left, right, &miss);
   2224 
   2225   // Check that both operands are internalized strings.
   2226   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2227   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2228   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2229   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2230   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   2231   __ Or(tmp1, tmp1, Operand(tmp2));
   2232   __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   2233   __ Branch(&miss, ne, at, Operand(zero_reg));
   2234 
   2235   // Make sure a0 is non-zero. At this point input operands are
   2236   // guaranteed to be non-zero.
   2237   DCHECK(right.is(a0));
   2238   STATIC_ASSERT(EQUAL == 0);
   2239   STATIC_ASSERT(kSmiTag == 0);
   2240   __ mov(v0, right);
   2241   // Internalized strings are compared by identity.
   2242   __ Ret(ne, left, Operand(right));
   2243   DCHECK(is_int16(EQUAL));
   2244   __ Ret(USE_DELAY_SLOT);
   2245   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   2246 
   2247   __ bind(&miss);
   2248   GenerateMiss(masm);
   2249 }
   2250 
   2251 
   2252 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
   2253   DCHECK(state() == CompareICState::UNIQUE_NAME);
   2254   DCHECK(GetCondition() == eq);
   2255   Label miss;
   2256 
   2257   // Registers containing left and right operands respectively.
   2258   Register left = a1;
   2259   Register right = a0;
   2260   Register tmp1 = a2;
   2261   Register tmp2 = a3;
   2262 
   2263   // Check that both operands are heap objects.
   2264   __ JumpIfEitherSmi(left, right, &miss);
   2265 
   2266   // Check that both operands are unique names. This leaves the instance
   2267   // types loaded in tmp1 and tmp2.
   2268   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2269   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2270   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2271   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2272 
   2273   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
   2274   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
   2275 
   2276   // Use a0 as result
   2277   __ mov(v0, a0);
   2278 
   2279   // Unique names are compared by identity.
   2280   Label done;
   2281   __ Branch(&done, ne, left, Operand(right));
   2282   // Make sure a0 is non-zero. At this point input operands are
   2283   // guaranteed to be non-zero.
   2284   DCHECK(right.is(a0));
   2285   STATIC_ASSERT(EQUAL == 0);
   2286   STATIC_ASSERT(kSmiTag == 0);
   2287   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   2288   __ bind(&done);
   2289   __ Ret();
   2290 
   2291   __ bind(&miss);
   2292   GenerateMiss(masm);
   2293 }
   2294 
   2295 
   2296 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
   2297   DCHECK(state() == CompareICState::STRING);
   2298   Label miss;
   2299 
   2300   bool equality = Token::IsEqualityOp(op());
   2301 
   2302   // Registers containing left and right operands respectively.
   2303   Register left = a1;
   2304   Register right = a0;
   2305   Register tmp1 = a2;
   2306   Register tmp2 = a3;
   2307   Register tmp3 = t0;
   2308   Register tmp4 = t1;
   2309   Register tmp5 = t2;
   2310 
   2311   // Check that both operands are heap objects.
   2312   __ JumpIfEitherSmi(left, right, &miss);
   2313 
   2314   // Check that both operands are strings. This leaves the instance
   2315   // types loaded in tmp1 and tmp2.
   2316   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2317   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2318   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2319   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2320   STATIC_ASSERT(kNotStringTag != 0);
   2321   __ Or(tmp3, tmp1, tmp2);
   2322   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
   2323   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
   2324 
   2325   // Fast check for identical strings.
   2326   Label left_ne_right;
   2327   STATIC_ASSERT(EQUAL == 0);
   2328   STATIC_ASSERT(kSmiTag == 0);
   2329   __ Branch(&left_ne_right, ne, left, Operand(right));
   2330   __ Ret(USE_DELAY_SLOT);
   2331   __ mov(v0, zero_reg);  // In the delay slot.
   2332   __ bind(&left_ne_right);
   2333 
   2334   // Handle not identical strings.
   2335 
   2336   // Check that both strings are internalized strings. If they are, we're done
   2337   // because we already know they are not identical. We know they are both
   2338   // strings.
   2339   if (equality) {
   2340     DCHECK(GetCondition() == eq);
   2341     STATIC_ASSERT(kInternalizedTag == 0);
   2342     __ Or(tmp3, tmp1, Operand(tmp2));
   2343     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
   2344     Label is_symbol;
   2345     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
   2346     // Make sure a0 is non-zero. At this point input operands are
   2347     // guaranteed to be non-zero.
   2348     DCHECK(right.is(a0));
   2349     __ Ret(USE_DELAY_SLOT);
   2350     __ mov(v0, a0);  // In the delay slot.
   2351     __ bind(&is_symbol);
   2352   }
   2353 
   2354   // Check that both strings are sequential one-byte.
   2355   Label runtime;
   2356   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
   2357                                                     &runtime);
   2358 
   2359   // Compare flat one-byte strings. Returns when done.
   2360   if (equality) {
   2361     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
   2362                                                   tmp3);
   2363   } else {
   2364     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
   2365                                                     tmp2, tmp3, tmp4);
   2366   }
   2367 
   2368   // Handle more complex cases in runtime.
   2369   __ bind(&runtime);
   2370   if (equality) {
   2371     {
   2372       FrameScope scope(masm, StackFrame::INTERNAL);
   2373       __ Push(left, right);
   2374       __ CallRuntime(Runtime::kStringEqual);
   2375     }
   2376     __ LoadRoot(a0, Heap::kTrueValueRootIndex);
   2377     __ Ret(USE_DELAY_SLOT);
   2378     __ Subu(v0, v0, a0);  // In delay slot.
   2379   } else {
   2380     __ Push(left, right);
   2381     __ TailCallRuntime(Runtime::kStringCompare);
   2382   }
   2383 
   2384   __ bind(&miss);
   2385   GenerateMiss(masm);
   2386 }
   2387 
   2388 
   2389 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
   2390   DCHECK_EQ(CompareICState::RECEIVER, state());
   2391   Label miss;
   2392   __ And(a2, a1, Operand(a0));
   2393   __ JumpIfSmi(a2, &miss);
   2394 
   2395   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   2396   __ GetObjectType(a0, a2, a2);
   2397   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
   2398   __ GetObjectType(a1, a2, a2);
   2399   __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
   2400 
   2401   DCHECK_EQ(eq, GetCondition());
   2402   __ Ret(USE_DELAY_SLOT);
   2403   __ subu(v0, a0, a1);
   2404 
   2405   __ bind(&miss);
   2406   GenerateMiss(masm);
   2407 }
   2408 
   2409 
   2410 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   2411   Label miss;
   2412   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   2413   __ And(a2, a1, a0);
   2414   __ JumpIfSmi(a2, &miss);
   2415   __ GetWeakValue(t0, cell);
   2416   __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
   2417   __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
   2418   __ Branch(&miss, ne, a2, Operand(t0));
   2419   __ Branch(&miss, ne, a3, Operand(t0));
   2420 
   2421   if (Token::IsEqualityOp(op())) {
   2422     __ Ret(USE_DELAY_SLOT);
   2423     __ subu(v0, a0, a1);
   2424   } else {
   2425     if (op() == Token::LT || op() == Token::LTE) {
   2426       __ li(a2, Operand(Smi::FromInt(GREATER)));
   2427     } else {
   2428       __ li(a2, Operand(Smi::FromInt(LESS)));
   2429     }
   2430     __ Push(a1, a0, a2);
   2431     __ TailCallRuntime(Runtime::kCompare);
   2432   }
   2433 
   2434   __ bind(&miss);
   2435   GenerateMiss(masm);
   2436 }
   2437 
   2438 
   2439 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   2440   {
   2441     // Call the runtime system in a fresh internal frame.
   2442     FrameScope scope(masm, StackFrame::INTERNAL);
   2443     __ Push(a1, a0);
   2444     __ Push(ra, a1, a0);
   2445     __ li(t0, Operand(Smi::FromInt(op())));
   2446     __ addiu(sp, sp, -kPointerSize);
   2447     __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
   2448                    USE_DELAY_SLOT);
   2449     __ sw(t0, MemOperand(sp));  // In the delay slot.
   2450     // Compute the entry point of the rewritten stub.
   2451     __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
   2452     // Restore registers.
   2453     __ Pop(a1, a0, ra);
   2454   }
   2455   __ Jump(a2);
   2456 }
   2457 
   2458 
   2459 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   2460   // Make place for arguments to fit C calling convention. Most of the callers
   2461   // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
   2462   // so they handle stack restoring and we don't have to do that here.
   2463   // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
   2464   // kCArgsSlotsSize stack space after the call.
   2465   __ Subu(sp, sp, Operand(kCArgsSlotsSize));
   2466   // Place the return address on the stack, making the call
   2467   // GC safe. The RegExp backend also relies on this.
   2468   __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
   2469   __ Call(t9);  // Call the C++ function.
   2470   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
   2471 
   2472   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
   2473     // In case of an error the return address may point to a memory area
   2474     // filled with kZapValue by the GC.
   2475     // Dereference the address and check for this.
   2476     __ lw(t0, MemOperand(t9));
   2477     __ Assert(ne, kReceivedInvalidReturnAddress, t0,
   2478         Operand(reinterpret_cast<uint32_t>(kZapValue)));
   2479   }
   2480   __ Jump(t9);
   2481 }
   2482 
   2483 
   2484 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
   2485                                     Register target) {
   2486   intptr_t loc =
   2487       reinterpret_cast<intptr_t>(GetCode().location());
   2488   __ Move(t9, target);
   2489   __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
   2490   __ Call(at);
   2491 }
   2492 
   2493 
   2494 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
   2495                                                       Label* miss,
   2496                                                       Label* done,
   2497                                                       Register receiver,
   2498                                                       Register properties,
   2499                                                       Handle<Name> name,
   2500                                                       Register scratch0) {
   2501   DCHECK(name->IsUniqueName());
   2502   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   2503   // not equal to the name and kProbes-th slot is not used (its name is the
   2504   // undefined value), it guarantees the hash table doesn't contain the
   2505   // property. It's true even if some slots represent deleted properties
   2506   // (their names are the hole value).
   2507   for (int i = 0; i < kInlinedProbes; i++) {
   2508     // scratch0 points to properties hash.
   2509     // Compute the masked index: (hash + i + i * i) & mask.
   2510     Register index = scratch0;
   2511     // Capacity is smi 2^n.
   2512     __ lw(index, FieldMemOperand(properties, kCapacityOffset));
   2513     __ Subu(index, index, Operand(1));
   2514     __ And(index, index, Operand(
   2515         Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
   2516 
   2517     // Scale the index by multiplying by the entry size.
   2518     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   2519     __ Lsa(index, index, index, 1);
   2520 
   2521     Register entity_name = scratch0;
   2522     // Having undefined at this place means the name is not contained.
   2523     STATIC_ASSERT(kSmiTagSize == 1);
   2524     Register tmp = properties;
   2525     __ Lsa(tmp, properties, index, 1);
   2526     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   2527 
   2528     DCHECK(!tmp.is(entity_name));
   2529     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
   2530     __ Branch(done, eq, entity_name, Operand(tmp));
   2531 
   2532     // Load the hole ready for use below:
   2533     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
   2534 
   2535     // Stop if found the property.
   2536     __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
   2537 
   2538     Label good;
   2539     __ Branch(&good, eq, entity_name, Operand(tmp));
   2540 
   2541     // Check if the entry name is not a unique name.
   2542     __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   2543     __ lbu(entity_name,
   2544            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   2545     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
   2546     __ bind(&good);
   2547 
   2548     // Restore the properties.
   2549     __ lw(properties,
   2550           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   2551   }
   2552 
   2553   const int spill_mask =
   2554       (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
   2555        a2.bit() | a1.bit() | a0.bit() | v0.bit());
   2556 
   2557   __ MultiPush(spill_mask);
   2558   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   2559   __ li(a1, Operand(Handle<Name>(name)));
   2560   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   2561   __ CallStub(&stub);
   2562   __ mov(at, v0);
   2563   __ MultiPop(spill_mask);
   2564 
   2565   __ Branch(done, eq, at, Operand(zero_reg));
   2566   __ Branch(miss, ne, at, Operand(zero_reg));
   2567 }
   2568 
   2569 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   2570   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   2571   // we cannot call anything that could cause a GC from this stub.
   2572   // Registers:
   2573   //  result: NameDictionary to probe
   2574   //  a1: key
   2575   //  dictionary: NameDictionary to probe.
   2576   //  index: will hold an index of entry if lookup is successful.
   2577   //         might alias with result_.
   2578   // Returns:
   2579   //  result_ is zero if lookup failed, non zero otherwise.
   2580 
   2581   Register result = v0;
   2582   Register dictionary = a0;
   2583   Register key = a1;
   2584   Register index = a2;
   2585   Register mask = a3;
   2586   Register hash = t0;
   2587   Register undefined = t1;
   2588   Register entry_key = t2;
   2589 
   2590   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   2591 
   2592   __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
   2593   __ sra(mask, mask, kSmiTagSize);
   2594   __ Subu(mask, mask, Operand(1));
   2595 
   2596   __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
   2597 
   2598   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   2599 
   2600   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   2601     // Compute the masked index: (hash + i + i * i) & mask.
   2602     // Capacity is smi 2^n.
   2603     if (i > 0) {
   2604       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   2605       // the hash in a separate instruction. The value hash + i + i * i is right
   2606       // shifted in the following and instruction.
   2607       DCHECK(NameDictionary::GetProbeOffset(i) <
   2608              1 << (32 - Name::kHashFieldOffset));
   2609       __ Addu(index, hash, Operand(
   2610           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   2611     } else {
   2612       __ mov(index, hash);
   2613     }
   2614     __ srl(index, index, Name::kHashShift);
   2615     __ And(index, mask, index);
   2616 
   2617     // Scale the index by multiplying by the entry size.
   2618     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   2619     // index *= 3.
   2620     __ Lsa(index, index, index, 1);
   2621 
   2622     STATIC_ASSERT(kSmiTagSize == 1);
   2623     __ Lsa(index, dictionary, index, 2);
   2624     __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
   2625 
   2626     // Having undefined at this place means the name is not contained.
   2627     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
   2628 
   2629     // Stop if found the property.
   2630     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
   2631 
   2632     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
   2633       // Check if the entry name is not a unique name.
   2634       __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   2635       __ lbu(entry_key,
   2636              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   2637       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
   2638     }
   2639   }
   2640 
   2641   __ bind(&maybe_in_dictionary);
   2642   // If we are doing negative lookup then probing failure should be
   2643   // treated as a lookup success. For positive lookup probing failure
   2644   // should be treated as lookup failure.
   2645   if (mode() == POSITIVE_LOOKUP) {
   2646     __ Ret(USE_DELAY_SLOT);
   2647     __ mov(result, zero_reg);
   2648   }
   2649 
   2650   __ bind(&in_dictionary);
   2651   __ Ret(USE_DELAY_SLOT);
   2652   __ li(result, 1);
   2653 
   2654   __ bind(&not_in_dictionary);
   2655   __ Ret(USE_DELAY_SLOT);
   2656   __ mov(result, zero_reg);
   2657 }
   2658 
   2659 
   2660 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
   2661     Isolate* isolate) {
   2662   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
   2663   stub1.GetCode();
   2664   // Hydrogen code stubs need stub2 at snapshot time.
   2665   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
   2666   stub2.GetCode();
   2667 }
   2668 
   2669 
   2670 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
   2671 // the value has just been written into the object, now this stub makes sure
   2672 // we keep the GC informed.  The word in the object where the value has been
   2673 // written is in the address register.
   2674 void RecordWriteStub::Generate(MacroAssembler* masm) {
   2675   Label skip_to_incremental_noncompacting;
   2676   Label skip_to_incremental_compacting;
   2677 
   2678   // The first two branch+nop instructions are generated with labels so as to
   2679   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
   2680   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
   2681   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
   2682   // incremental heap marking.
   2683   // See RecordWriteStub::Patch for details.
   2684   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
   2685   __ nop();
   2686   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
   2687   __ nop();
   2688 
   2689   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   2690     __ RememberedSetHelper(object(),
   2691                            address(),
   2692                            value(),
   2693                            save_fp_regs_mode(),
   2694                            MacroAssembler::kReturnAtEnd);
   2695   }
   2696   __ Ret();
   2697 
   2698   __ bind(&skip_to_incremental_noncompacting);
   2699   GenerateIncremental(masm, INCREMENTAL);
   2700 
   2701   __ bind(&skip_to_incremental_compacting);
   2702   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   2703 
   2704   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
   2705   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
   2706 
   2707   PatchBranchIntoNop(masm, 0);
   2708   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
   2709 }
   2710 
   2711 
   2712 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   2713   regs_.Save(masm);
   2714 
   2715   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   2716     Label dont_need_remembered_set;
   2717 
   2718     __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
   2719     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
   2720                            regs_.scratch0(),
   2721                            &dont_need_remembered_set);
   2722 
   2723     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
   2724                         &dont_need_remembered_set);
   2725 
   2726     // First notify the incremental marker if necessary, then update the
   2727     // remembered set.
   2728     CheckNeedsToInformIncrementalMarker(
   2729         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   2730     InformIncrementalMarker(masm);
   2731     regs_.Restore(masm);
   2732     __ RememberedSetHelper(object(),
   2733                            address(),
   2734                            value(),
   2735                            save_fp_regs_mode(),
   2736                            MacroAssembler::kReturnAtEnd);
   2737 
   2738     __ bind(&dont_need_remembered_set);
   2739   }
   2740 
   2741   CheckNeedsToInformIncrementalMarker(
   2742       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   2743   InformIncrementalMarker(masm);
   2744   regs_.Restore(masm);
   2745   __ Ret();
   2746 }
   2747 
   2748 
   2749 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   2750   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   2751   int argument_count = 3;
   2752   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   2753   Register address =
   2754       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
   2755   DCHECK(!address.is(regs_.object()));
   2756   DCHECK(!address.is(a0));
   2757   __ Move(address, regs_.address());
   2758   __ Move(a0, regs_.object());
   2759   __ Move(a1, address);
   2760   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   2761 
   2762   AllowExternalCallThatCantCauseGC scope(masm);
   2763   __ CallCFunction(
   2764       ExternalReference::incremental_marking_record_write_function(isolate()),
   2765       argument_count);
   2766   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
   2767 }
   2768 
   2769 
   2770 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   2771     MacroAssembler* masm,
   2772     OnNoNeedToInformIncrementalMarker on_no_need,
   2773     Mode mode) {
   2774   Label on_black;
   2775   Label need_incremental;
   2776   Label need_incremental_pop_scratch;
   2777 
   2778   // Let's look at the color of the object:  If it is not black we don't have
   2779   // to inform the incremental marker.
   2780   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   2781 
   2782   regs_.Restore(masm);
   2783   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   2784     __ RememberedSetHelper(object(),
   2785                            address(),
   2786                            value(),
   2787                            save_fp_regs_mode(),
   2788                            MacroAssembler::kReturnAtEnd);
   2789   } else {
   2790     __ Ret();
   2791   }
   2792 
   2793   __ bind(&on_black);
   2794 
   2795   // Get the value from the slot.
   2796   __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
   2797 
   2798   if (mode == INCREMENTAL_COMPACTION) {
   2799     Label ensure_not_white;
   2800 
   2801     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
   2802                      regs_.scratch1(),  // Scratch.
   2803                      MemoryChunk::kEvacuationCandidateMask,
   2804                      eq,
   2805                      &ensure_not_white);
   2806 
   2807     __ CheckPageFlag(regs_.object(),
   2808                      regs_.scratch1(),  // Scratch.
   2809                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
   2810                      eq,
   2811                      &need_incremental);
   2812 
   2813     __ bind(&ensure_not_white);
   2814   }
   2815 
   2816   // We need extra registers for this, so we push the object and the address
   2817   // register temporarily.
   2818   __ Push(regs_.object(), regs_.address());
   2819   __ JumpIfWhite(regs_.scratch0(),  // The value.
   2820                  regs_.scratch1(),  // Scratch.
   2821                  regs_.object(),    // Scratch.
   2822                  regs_.address(),   // Scratch.
   2823                  &need_incremental_pop_scratch);
   2824   __ Pop(regs_.object(), regs_.address());
   2825 
   2826   regs_.Restore(masm);
   2827   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   2828     __ RememberedSetHelper(object(),
   2829                            address(),
   2830                            value(),
   2831                            save_fp_regs_mode(),
   2832                            MacroAssembler::kReturnAtEnd);
   2833   } else {
   2834     __ Ret();
   2835   }
   2836 
   2837   __ bind(&need_incremental_pop_scratch);
   2838   __ Pop(regs_.object(), regs_.address());
   2839 
   2840   __ bind(&need_incremental);
   2841 
   2842   // Fall through when we need to inform the incremental marker.
   2843 }
   2844 
   2845 
   2846 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   2847   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   2848   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   2849   int parameter_count_offset =
   2850       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   2851   __ lw(a1, MemOperand(fp, parameter_count_offset));
   2852   if (function_mode() == JS_FUNCTION_STUB_MODE) {
   2853     __ Addu(a1, a1, Operand(1));
   2854   }
   2855   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   2856   __ sll(a1, a1, kPointerSizeLog2);
   2857   __ Ret(USE_DELAY_SLOT);
   2858   __ Addu(sp, sp, a1);
   2859 }
   2860 
   2861 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   2862   if (masm->isolate()->function_entry_hook() != NULL) {
   2863     ProfileEntryHookStub stub(masm->isolate());
   2864     __ push(ra);
   2865     __ CallStub(&stub);
   2866     __ pop(ra);
   2867   }
   2868 }
   2869 
   2870 
   2871 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   2872   // The entry hook is a "push ra" instruction, followed by a call.
   2873   // Note: on MIPS "push" is 2 instruction
   2874   const int32_t kReturnAddressDistanceFromFunctionStart =
   2875       Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
   2876 
   2877   // This should contain all kJSCallerSaved registers.
   2878   const RegList kSavedRegs =
   2879      kJSCallerSaved |  // Caller saved registers.
   2880      s5.bit();         // Saved stack pointer.
   2881 
   2882   // We also save ra, so the count here is one higher than the mask indicates.
   2883   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
   2884 
   2885   // Save all caller-save registers as this may be called from anywhere.
   2886   __ MultiPush(kSavedRegs | ra.bit());
   2887 
   2888   // Compute the function's address for the first argument.
   2889   __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
   2890 
   2891   // The caller's return address is above the saved temporaries.
   2892   // Grab that for the second argument to the hook.
   2893   __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
   2894 
   2895   // Align the stack if necessary.
   2896   int frame_alignment = masm->ActivationFrameAlignment();
   2897   if (frame_alignment > kPointerSize) {
   2898     __ mov(s5, sp);
   2899     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   2900     __ And(sp, sp, Operand(-frame_alignment));
   2901   }
   2902   __ Subu(sp, sp, kCArgsSlotsSize);
   2903 #if defined(V8_HOST_ARCH_MIPS)
   2904   int32_t entry_hook =
   2905       reinterpret_cast<int32_t>(isolate()->function_entry_hook());
   2906   __ li(t9, Operand(entry_hook));
   2907 #else
   2908   // Under the simulator we need to indirect the entry hook through a
   2909   // trampoline function at a known address.
   2910   // It additionally takes an isolate as a third parameter.
   2911   __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
   2912 
   2913   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   2914   __ li(t9, Operand(ExternalReference(&dispatcher,
   2915                                       ExternalReference::BUILTIN_CALL,
   2916                                       isolate())));
   2917 #endif
   2918   // Call C function through t9 to conform ABI for PIC.
   2919   __ Call(t9);
   2920 
   2921   // Restore the stack pointer if needed.
   2922   if (frame_alignment > kPointerSize) {
   2923     __ mov(sp, s5);
   2924   } else {
   2925     __ Addu(sp, sp, kCArgsSlotsSize);
   2926   }
   2927 
   2928   // Also pop ra to get Ret(0).
   2929   __ MultiPop(kSavedRegs | ra.bit());
   2930   __ Ret();
   2931 }
   2932 
   2933 
   2934 template<class T>
   2935 static void CreateArrayDispatch(MacroAssembler* masm,
   2936                                 AllocationSiteOverrideMode mode) {
   2937   if (mode == DISABLE_ALLOCATION_SITES) {
   2938     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   2939     __ TailCallStub(&stub);
   2940   } else if (mode == DONT_OVERRIDE) {
   2941     int last_index = GetSequenceIndexFromFastElementsKind(
   2942         TERMINAL_FAST_ELEMENTS_KIND);
   2943     for (int i = 0; i <= last_index; ++i) {
   2944       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   2945       T stub(masm->isolate(), kind);
   2946       __ TailCallStub(&stub, eq, a3, Operand(kind));
   2947     }
   2948 
   2949     // If we reached this point there is a problem.
   2950     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   2951   } else {
   2952     UNREACHABLE();
   2953   }
   2954 }
   2955 
   2956 
   2957 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   2958                                            AllocationSiteOverrideMode mode) {
   2959   // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   2960   // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
   2961   // a0 - number of arguments
   2962   // a1 - constructor?
   2963   // sp[0] - last argument
   2964   Label normal_sequence;
   2965   if (mode == DONT_OVERRIDE) {
   2966     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2967     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2968     STATIC_ASSERT(FAST_ELEMENTS == 2);
   2969     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2970     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   2971     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   2972 
   2973     // is the low bit set? If so, we are holey and that is good.
   2974     __ And(at, a3, Operand(1));
   2975     __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
   2976   }
   2977 
   2978   // look at the first argument
   2979   __ lw(t1, MemOperand(sp, 0));
   2980   __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
   2981 
   2982   if (mode == DISABLE_ALLOCATION_SITES) {
   2983     ElementsKind initial = GetInitialFastElementsKind();
   2984     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   2985 
   2986     ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
   2987                                                   holey_initial,
   2988                                                   DISABLE_ALLOCATION_SITES);
   2989     __ TailCallStub(&stub_holey);
   2990 
   2991     __ bind(&normal_sequence);
   2992     ArraySingleArgumentConstructorStub stub(masm->isolate(),
   2993                                             initial,
   2994                                             DISABLE_ALLOCATION_SITES);
   2995     __ TailCallStub(&stub);
   2996   } else if (mode == DONT_OVERRIDE) {
   2997     // We are going to create a holey array, but our kind is non-holey.
   2998     // Fix kind and retry (only if we have an allocation site in the slot).
   2999     __ Addu(a3, a3, Operand(1));
   3000 
   3001     if (FLAG_debug_code) {
   3002       __ lw(t1, FieldMemOperand(a2, 0));
   3003       __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
   3004       __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
   3005     }
   3006 
   3007     // Save the resulting elements kind in type info. We can't just store a3
   3008     // in the AllocationSite::transition_info field because elements kind is
   3009     // restricted to a portion of the field...upper bits need to be left alone.
   3010     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   3011     __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
   3012     __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
   3013     __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
   3014 
   3015 
   3016     __ bind(&normal_sequence);
   3017     int last_index = GetSequenceIndexFromFastElementsKind(
   3018         TERMINAL_FAST_ELEMENTS_KIND);
   3019     for (int i = 0; i <= last_index; ++i) {
   3020       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   3021       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
   3022       __ TailCallStub(&stub, eq, a3, Operand(kind));
   3023     }
   3024 
   3025     // If we reached this point there is a problem.
   3026     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   3027   } else {
   3028     UNREACHABLE();
   3029   }
   3030 }
   3031 
   3032 
   3033 template<class T>
   3034 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   3035   int to_index = GetSequenceIndexFromFastElementsKind(
   3036       TERMINAL_FAST_ELEMENTS_KIND);
   3037   for (int i = 0; i <= to_index; ++i) {
   3038     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   3039     T stub(isolate, kind);
   3040     stub.GetCode();
   3041     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   3042       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   3043       stub1.GetCode();
   3044     }
   3045   }
   3046 }
   3047 
   3048 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   3049   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   3050       isolate);
   3051   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
   3052       isolate);
   3053   ArrayNArgumentsConstructorStub stub(isolate);
   3054   stub.GetCode();
   3055   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   3056   for (int i = 0; i < 2; i++) {
   3057     // For internal arrays we only need a few things.
   3058     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   3059     stubh1.GetCode();
   3060     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   3061     stubh2.GetCode();
   3062   }
   3063 }
   3064 
   3065 
   3066 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   3067     MacroAssembler* masm,
   3068     AllocationSiteOverrideMode mode) {
   3069   Label not_zero_case, not_one_case;
   3070   __ And(at, a0, a0);
   3071   __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
   3072   CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   3073 
   3074   __ bind(&not_zero_case);
   3075   __ Branch(&not_one_case, gt, a0, Operand(1));
   3076   CreateArrayDispatchOneArgument(masm, mode);
   3077 
   3078   __ bind(&not_one_case);
   3079   ArrayNArgumentsConstructorStub stub(masm->isolate());
   3080   __ TailCallStub(&stub);
   3081 }
   3082 
   3083 
   3084 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   3085   // ----------- S t a t e -------------
   3086   //  -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
   3087   //  -- a1 : constructor
   3088   //  -- a2 : AllocationSite or undefined
   3089   //  -- a3 : Original constructor
   3090   //  -- sp[0] : last argument
   3091   // -----------------------------------
   3092 
   3093   if (FLAG_debug_code) {
   3094     // The array construct code is only set for the global and natives
   3095     // builtin Array functions which always have maps.
   3096 
   3097     // Initial map for the builtin Array function should be a map.
   3098     __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
   3099     // Will both indicate a NULL and a Smi.
   3100     __ SmiTst(t0, at);
   3101     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
   3102         at, Operand(zero_reg));
   3103     __ GetObjectType(t0, t0, t1);
   3104     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
   3105         t1, Operand(MAP_TYPE));
   3106 
   3107     // We should either have undefined in a2 or a valid AllocationSite
   3108     __ AssertUndefinedOrAllocationSite(a2, t0);
   3109   }
   3110 
   3111   // Enter the context of the Array function.
   3112   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
   3113 
   3114   Label subclassing;
   3115   __ Branch(&subclassing, ne, a1, Operand(a3));
   3116 
   3117   Label no_info;
   3118   // Get the elements kind and case on that.
   3119   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   3120   __ Branch(&no_info, eq, a2, Operand(at));
   3121 
   3122   __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
   3123   __ SmiUntag(a3);
   3124   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   3125   __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
   3126   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   3127 
   3128   __ bind(&no_info);
   3129   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   3130 
   3131   // Subclassing.
   3132   __ bind(&subclassing);
   3133   __ Lsa(at, sp, a0, kPointerSizeLog2);
   3134   __ sw(a1, MemOperand(at));
   3135   __ li(at, Operand(3));
   3136   __ addu(a0, a0, at);
   3137   __ Push(a3, a2);
   3138   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
   3139 }
   3140 
   3141 
   3142 void InternalArrayConstructorStub::GenerateCase(
   3143     MacroAssembler* masm, ElementsKind kind) {
   3144 
   3145   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   3146   __ TailCallStub(&stub0, lo, a0, Operand(1));
   3147 
   3148   ArrayNArgumentsConstructorStub stubN(isolate());
   3149   __ TailCallStub(&stubN, hi, a0, Operand(1));
   3150 
   3151   if (IsFastPackedElementsKind(kind)) {
   3152     // We might need to create a holey array
   3153     // look at the first argument.
   3154     __ lw(at, MemOperand(sp, 0));
   3155 
   3156     InternalArraySingleArgumentConstructorStub
   3157         stub1_holey(isolate(), GetHoleyElementsKind(kind));
   3158     __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
   3159   }
   3160 
   3161   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   3162   __ TailCallStub(&stub1);
   3163 }
   3164 
   3165 
   3166 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   3167   // ----------- S t a t e -------------
   3168   //  -- a0 : argc
   3169   //  -- a1 : constructor
   3170   //  -- sp[0] : return address
   3171   //  -- sp[4] : last argument
   3172   // -----------------------------------
   3173 
   3174   if (FLAG_debug_code) {
   3175     // The array construct code is only set for the global and natives
   3176     // builtin Array functions which always have maps.
   3177 
   3178     // Initial map for the builtin Array function should be a map.
   3179     __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
   3180     // Will both indicate a NULL and a Smi.
   3181     __ SmiTst(a3, at);
   3182     __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
   3183         at, Operand(zero_reg));
   3184     __ GetObjectType(a3, a3, t0);
   3185     __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
   3186         t0, Operand(MAP_TYPE));
   3187   }
   3188 
   3189   // Figure out the right elements kind.
   3190   __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
   3191 
   3192   // Load the map's "bit field 2" into a3. We only need the first byte,
   3193   // but the following bit field extraction takes care of that anyway.
   3194   __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
   3195   // Retrieve elements_kind from bit field 2.
   3196   __ DecodeField<Map::ElementsKindBits>(a3);
   3197 
   3198   if (FLAG_debug_code) {
   3199     Label done;
   3200     __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
   3201     __ Assert(
   3202         eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
   3203         a3, Operand(FAST_HOLEY_ELEMENTS));
   3204     __ bind(&done);
   3205   }
   3206 
   3207   Label fast_elements_case;
   3208   __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
   3209   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   3210 
   3211   __ bind(&fast_elements_case);
   3212   GenerateCase(masm, FAST_ELEMENTS);
   3213 }
   3214 
   3215 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   3216   return ref0.address() - ref1.address();
   3217 }
   3218 
   3219 
   3220 // Calls an API function.  Allocates HandleScope, extracts returned value
   3221 // from handle and propagates exceptions.  Restores context.  stack_space
   3222 // - space to be unwound on exit (includes the call JS arguments space and
   3223 // the additional space allocated for the fast call).
   3224 static void CallApiFunctionAndReturn(
   3225     MacroAssembler* masm, Register function_address,
   3226     ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
   3227     MemOperand return_value_operand, MemOperand* context_restore_operand) {
   3228   Isolate* isolate = masm->isolate();
   3229   ExternalReference next_address =
   3230       ExternalReference::handle_scope_next_address(isolate);
   3231   const int kNextOffset = 0;
   3232   const int kLimitOffset = AddressOffset(
   3233       ExternalReference::handle_scope_limit_address(isolate), next_address);
   3234   const int kLevelOffset = AddressOffset(
   3235       ExternalReference::handle_scope_level_address(isolate), next_address);
   3236 
   3237   DCHECK(function_address.is(a1) || function_address.is(a2));
   3238 
   3239   Label profiler_disabled;
   3240   Label end_profiler_check;
   3241   __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
   3242   __ lb(t9, MemOperand(t9, 0));
   3243   __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
   3244 
   3245   // Additional parameter is the address of the actual callback.
   3246   __ li(t9, Operand(thunk_ref));
   3247   __ jmp(&end_profiler_check);
   3248 
   3249   __ bind(&profiler_disabled);
   3250   __ mov(t9, function_address);
   3251   __ bind(&end_profiler_check);
   3252 
   3253   // Allocate HandleScope in callee-save registers.
   3254   __ li(s3, Operand(next_address));
   3255   __ lw(s0, MemOperand(s3, kNextOffset));
   3256   __ lw(s1, MemOperand(s3, kLimitOffset));
   3257   __ lw(s2, MemOperand(s3, kLevelOffset));
   3258   __ Addu(s2, s2, Operand(1));
   3259   __ sw(s2, MemOperand(s3, kLevelOffset));
   3260 
   3261   if (FLAG_log_timer_events) {
   3262     FrameScope frame(masm, StackFrame::MANUAL);
   3263     __ PushSafepointRegisters();
   3264     __ PrepareCallCFunction(1, a0);
   3265     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
   3266     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
   3267                      1);
   3268     __ PopSafepointRegisters();
   3269   }
   3270 
   3271   // Native call returns to the DirectCEntry stub which redirects to the
   3272   // return address pushed on stack (could have moved after GC).
   3273   // DirectCEntry stub itself is generated early and never moves.
   3274   DirectCEntryStub stub(isolate);
   3275   stub.GenerateCall(masm, t9);
   3276 
   3277   if (FLAG_log_timer_events) {
   3278     FrameScope frame(masm, StackFrame::MANUAL);
   3279     __ PushSafepointRegisters();
   3280     __ PrepareCallCFunction(1, a0);
   3281     __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
   3282     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
   3283                      1);
   3284     __ PopSafepointRegisters();
   3285   }
   3286 
   3287   Label promote_scheduled_exception;
   3288   Label delete_allocated_handles;
   3289   Label leave_exit_frame;
   3290   Label return_value_loaded;
   3291 
   3292   // Load value from ReturnValue.
   3293   __ lw(v0, return_value_operand);
   3294   __ bind(&return_value_loaded);
   3295 
   3296   // No more valid handles (the result handle was the last one). Restore
   3297   // previous handle scope.
   3298   __ sw(s0, MemOperand(s3, kNextOffset));
   3299   if (__ emit_debug_code()) {
   3300     __ lw(a1, MemOperand(s3, kLevelOffset));
   3301     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
   3302   }
   3303   __ Subu(s2, s2, Operand(1));
   3304   __ sw(s2, MemOperand(s3, kLevelOffset));
   3305   __ lw(at, MemOperand(s3, kLimitOffset));
   3306   __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
   3307 
   3308   // Leave the API exit frame.
   3309   __ bind(&leave_exit_frame);
   3310 
   3311   bool restore_context = context_restore_operand != NULL;
   3312   if (restore_context) {
   3313     __ lw(cp, *context_restore_operand);
   3314   }
   3315   if (stack_space_offset != kInvalidStackOffset) {
   3316     // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
   3317     // so this must be accounted for.
   3318     __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
   3319   } else {
   3320     __ li(s0, Operand(stack_space));
   3321   }
   3322   __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
   3323                     stack_space_offset != kInvalidStackOffset);
   3324 
   3325   // Check if the function scheduled an exception.
   3326   __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
   3327   __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
   3328   __ lw(t1, MemOperand(at));
   3329   __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
   3330 
   3331   __ Ret();
   3332 
   3333   // Re-throw by promoting a scheduled exception.
   3334   __ bind(&promote_scheduled_exception);
   3335   __ TailCallRuntime(Runtime::kPromoteScheduledException);
   3336 
   3337   // HandleScope limit has changed. Delete allocated extensions.
   3338   __ bind(&delete_allocated_handles);
   3339   __ sw(s1, MemOperand(s3, kLimitOffset));
   3340   __ mov(s0, v0);
   3341   __ mov(a0, v0);
   3342   __ PrepareCallCFunction(1, s1);
   3343   __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
   3344   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
   3345                    1);
   3346   __ mov(v0, s0);
   3347   __ jmp(&leave_exit_frame);
   3348 }
   3349 
   3350 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   3351   // ----------- S t a t e -------------
   3352   //  -- a0                  : callee
   3353   //  -- t0                  : call_data
   3354   //  -- a2                  : holder
   3355   //  -- a1                  : api_function_address
   3356   //  -- cp                  : context
   3357   //  --
   3358   //  -- sp[0]               : last argument
   3359   //  -- ...
   3360   //  -- sp[(argc - 1)* 4]   : first argument
   3361   //  -- sp[argc * 4]        : receiver
   3362   // -----------------------------------
   3363 
   3364   Register callee = a0;
   3365   Register call_data = t0;
   3366   Register holder = a2;
   3367   Register api_function_address = a1;
   3368   Register context = cp;
   3369 
   3370   typedef FunctionCallbackArguments FCA;
   3371 
   3372   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   3373   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   3374   STATIC_ASSERT(FCA::kDataIndex == 4);
   3375   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   3376   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   3377   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   3378   STATIC_ASSERT(FCA::kHolderIndex == 0);
   3379   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
   3380   STATIC_ASSERT(FCA::kArgsLength == 8);
   3381 
   3382   // new target
   3383   __ PushRoot(Heap::kUndefinedValueRootIndex);
   3384 
   3385   // Save context, callee and call data.
   3386   __ Push(context, callee, call_data);
   3387   if (!is_lazy()) {
   3388     // Load context from callee.
   3389     __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   3390   }
   3391 
   3392   Register scratch = call_data;
   3393   if (!call_data_undefined()) {
   3394     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3395   }
   3396   // Push return value and default return value.
   3397   __ Push(scratch, scratch);
   3398   __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
   3399   // Push isolate and holder.
   3400   __ Push(scratch, holder);
   3401 
   3402   // Prepare arguments.
   3403   __ mov(scratch, sp);
   3404 
   3405   // Allocate the v8::Arguments structure in the arguments' space since
   3406   // it's not controlled by GC.
   3407   const int kApiStackSpace = 3;
   3408 
   3409   FrameScope frame_scope(masm, StackFrame::MANUAL);
   3410   __ EnterExitFrame(false, kApiStackSpace);
   3411 
   3412   DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
   3413   // a0 = FunctionCallbackInfo&
   3414   // Arguments is after the return address.
   3415   __ Addu(a0, sp, Operand(1 * kPointerSize));
   3416   // FunctionCallbackInfo::implicit_args_
   3417   __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
   3418   // FunctionCallbackInfo::values_
   3419   __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
   3420   __ sw(at, MemOperand(a0, 1 * kPointerSize));
   3421   // FunctionCallbackInfo::length_ = argc
   3422   __ li(at, Operand(argc()));
   3423   __ sw(at, MemOperand(a0, 2 * kPointerSize));
   3424 
   3425   ExternalReference thunk_ref =
   3426       ExternalReference::invoke_function_callback(masm->isolate());
   3427 
   3428   AllowExternalCallThatCantCauseGC scope(masm);
   3429   MemOperand context_restore_operand(
   3430       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   3431   // Stores return the first js argument.
   3432   int return_value_offset = 0;
   3433   if (is_store()) {
   3434     return_value_offset = 2 + FCA::kArgsLength;
   3435   } else {
   3436     return_value_offset = 2 + FCA::kReturnValueOffset;
   3437   }
   3438   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   3439   int stack_space = 0;
   3440   int32_t stack_space_offset = 3 * kPointerSize;
   3441   stack_space = argc() + FCA::kArgsLength + 1;
   3442   // TODO(adamk): Why are we clobbering this immediately?
   3443   stack_space_offset = kInvalidStackOffset;
   3444   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
   3445                            stack_space_offset, return_value_operand,
   3446                            &context_restore_operand);
   3447 }
   3448 
   3449 
   3450 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   3451   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
   3452   // name below the exit frame to make GC aware of them.
   3453   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
   3454   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
   3455   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
   3456   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
   3457   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
   3458   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
   3459   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
   3460   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
   3461 
   3462   Register receiver = ApiGetterDescriptor::ReceiverRegister();
   3463   Register holder = ApiGetterDescriptor::HolderRegister();
   3464   Register callback = ApiGetterDescriptor::CallbackRegister();
   3465   Register scratch = t0;
   3466   DCHECK(!AreAliased(receiver, holder, callback, scratch));
   3467 
   3468   Register api_function_address = a2;
   3469 
   3470   // Here and below +1 is for name() pushed after the args_ array.
   3471   typedef PropertyCallbackArguments PCA;
   3472   __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
   3473   __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
   3474   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
   3475   __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
   3476   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3477   __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
   3478   __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
   3479                                     kPointerSize));
   3480   __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
   3481   __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
   3482   __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
   3483   // should_throw_on_error -> false
   3484   DCHECK(Smi::kZero == nullptr);
   3485   __ sw(zero_reg,
   3486         MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
   3487   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   3488   __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
   3489 
   3490   // v8::PropertyCallbackInfo::args_ array and name handle.
   3491   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   3492 
   3493   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
   3494   __ mov(a0, sp);                              // a0 = Handle<Name>
   3495   __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
   3496 
   3497   const int kApiStackSpace = 1;
   3498   FrameScope frame_scope(masm, StackFrame::MANUAL);
   3499   __ EnterExitFrame(false, kApiStackSpace);
   3500 
   3501   // Create v8::PropertyCallbackInfo object on the stack and initialize
   3502   // it's args_ field.
   3503   __ sw(a1, MemOperand(sp, 1 * kPointerSize));
   3504   __ Addu(a1, sp, Operand(1 * kPointerSize));  // a1 = v8::PropertyCallbackInfo&
   3505 
   3506   ExternalReference thunk_ref =
   3507       ExternalReference::invoke_accessor_getter_callback(isolate());
   3508 
   3509   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
   3510   __ lw(api_function_address,
   3511         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
   3512 
   3513   // +3 is to skip prolog, return address and name handle.
   3514   MemOperand return_value_operand(
   3515       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   3516   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
   3517                            kStackUnwindSpace, kInvalidStackOffset,
   3518                            return_value_operand, NULL);
   3519 }
   3520 
   3521 #undef __
   3522 
   3523 }  // namespace internal
   3524 }  // namespace v8
   3525 
   3526 #endif  // V8_TARGET_ARCH_MIPS
   3527