Home | History | Annotate | Download | only in ppc
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_PPC
      6 
      7 #include "src/base/bits.h"
      8 #include "src/bootstrapper.h"
      9 #include "src/code-stubs.h"
     10 #include "src/codegen.h"
     11 #include "src/ic/handler-compiler.h"
     12 #include "src/ic/ic.h"
     13 #include "src/ic/stub-cache.h"
     14 #include "src/isolate.h"
     15 #include "src/ppc/code-stubs-ppc.h"
     16 #include "src/regexp/jsregexp.h"
     17 #include "src/regexp/regexp-macro-assembler.h"
     18 #include "src/runtime/runtime.h"
     19 
     20 namespace v8 {
     21 namespace internal {
     22 
     23 
     24 static void InitializeArrayConstructorDescriptor(
     25     Isolate* isolate, CodeStubDescriptor* descriptor,
     26     int constant_stack_parameter_count) {
     27   Address deopt_handler =
     28       Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
     29 
     30   if (constant_stack_parameter_count == 0) {
     31     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
     32                            JS_FUNCTION_STUB_MODE);
     33   } else {
     34     descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
     35                            JS_FUNCTION_STUB_MODE);
     36   }
     37 }
     38 
     39 
     40 static void InitializeInternalArrayConstructorDescriptor(
     41     Isolate* isolate, CodeStubDescriptor* descriptor,
     42     int constant_stack_parameter_count) {
     43   Address deopt_handler =
     44       Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
     45 
     46   if (constant_stack_parameter_count == 0) {
     47     descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
     48                            JS_FUNCTION_STUB_MODE);
     49   } else {
     50     descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
     51                            JS_FUNCTION_STUB_MODE);
     52   }
     53 }
     54 
     55 
     56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
     57     CodeStubDescriptor* descriptor) {
     58   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
     59 }
     60 
     61 
     62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     63     CodeStubDescriptor* descriptor) {
     64   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
     65 }
     66 
     67 
     68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
     69     CodeStubDescriptor* descriptor) {
     70   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
     71 }
     72 
     73 
     74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
     75     CodeStubDescriptor* descriptor) {
     76   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
     77 }
     78 
     79 
     80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     81     CodeStubDescriptor* descriptor) {
     82   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
     83 }
     84 
     85 
     86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
     87     CodeStubDescriptor* descriptor) {
     88   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
     89 }
     90 
     91 
     92 #define __ ACCESS_MASM(masm)
     93 
     94 
     95 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
     96                                           Condition cond, Strength strength);
     97 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
     98                                     Register rhs, Label* lhs_not_nan,
     99                                     Label* slow, bool strict);
    100 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
    101                                            Register rhs);
    102 
    103 
    104 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
    105                                                ExternalReference miss) {
    106   // Update the static counter each time a new code stub is generated.
    107   isolate()->counters()->code_stubs()->Increment();
    108 
    109   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
    110   int param_count = descriptor.GetRegisterParameterCount();
    111   {
    112     // Call the runtime system in a fresh internal frame.
    113     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
    114     DCHECK(param_count == 0 ||
    115            r3.is(descriptor.GetRegisterParameter(param_count - 1)));
    116     // Push arguments
    117     for (int i = 0; i < param_count; ++i) {
    118       __ push(descriptor.GetRegisterParameter(i));
    119     }
    120     __ CallExternalReference(miss, param_count);
    121   }
    122 
    123   __ Ret();
    124 }
    125 
    126 
    127 void DoubleToIStub::Generate(MacroAssembler* masm) {
    128   Label out_of_range, only_low, negate, done, fastpath_done;
    129   Register input_reg = source();
    130   Register result_reg = destination();
    131   DCHECK(is_truncating());
    132 
    133   int double_offset = offset();
    134 
    135   // Immediate values for this stub fit in instructions, so it's safe to use ip.
    136   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
    137   Register scratch_low =
    138       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
    139   Register scratch_high =
    140       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
    141   DoubleRegister double_scratch = kScratchDoubleReg;
    142 
    143   __ push(scratch);
    144   // Account for saved regs if input is sp.
    145   if (input_reg.is(sp)) double_offset += kPointerSize;
    146 
    147   if (!skip_fastpath()) {
    148     // Load double input.
    149     __ lfd(double_scratch, MemOperand(input_reg, double_offset));
    150 
    151     // Do fast-path convert from double to int.
    152     __ ConvertDoubleToInt64(double_scratch,
    153 #if !V8_TARGET_ARCH_PPC64
    154                             scratch,
    155 #endif
    156                             result_reg, d0);
    157 
    158 // Test for overflow
    159 #if V8_TARGET_ARCH_PPC64
    160     __ TestIfInt32(result_reg, r0);
    161 #else
    162     __ TestIfInt32(scratch, result_reg, r0);
    163 #endif
    164     __ beq(&fastpath_done);
    165   }
    166 
    167   __ Push(scratch_high, scratch_low);
    168   // Account for saved regs if input is sp.
    169   if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
    170 
    171   __ lwz(scratch_high,
    172          MemOperand(input_reg, double_offset + Register::kExponentOffset));
    173   __ lwz(scratch_low,
    174          MemOperand(input_reg, double_offset + Register::kMantissaOffset));
    175 
    176   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
    177   // Load scratch with exponent - 1. This is faster than loading
    178   // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
    179   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
    180   __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
    181   // If exponent is greater than or equal to 84, the 32 less significant
    182   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
    183   // the result is 0.
    184   // Compare exponent with 84 (compare exponent - 1 with 83).
    185   __ cmpi(scratch, Operand(83));
    186   __ bge(&out_of_range);
    187 
    188   // If we reach this code, 31 <= exponent <= 83.
    189   // So, we don't have to handle cases where 0 <= exponent <= 20 for
    190   // which we would need to shift right the high part of the mantissa.
    191   // Scratch contains exponent - 1.
    192   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
    193   __ subfic(scratch, scratch, Operand(51));
    194   __ cmpi(scratch, Operand::Zero());
    195   __ ble(&only_low);
    196   // 21 <= exponent <= 51, shift scratch_low and scratch_high
    197   // to generate the result.
    198   __ srw(scratch_low, scratch_low, scratch);
    199   // Scratch contains: 52 - exponent.
    200   // We needs: exponent - 20.
    201   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
    202   __ subfic(scratch, scratch, Operand(32));
    203   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
    204   // Set the implicit 1 before the mantissa part in scratch_high.
    205   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
    206   __ oris(result_reg, result_reg,
    207           Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16)));
    208   __ slw(r0, result_reg, scratch);
    209   __ orx(result_reg, scratch_low, r0);
    210   __ b(&negate);
    211 
    212   __ bind(&out_of_range);
    213   __ mov(result_reg, Operand::Zero());
    214   __ b(&done);
    215 
    216   __ bind(&only_low);
    217   // 52 <= exponent <= 83, shift only scratch_low.
    218   // On entry, scratch contains: 52 - exponent.
    219   __ neg(scratch, scratch);
    220   __ slw(result_reg, scratch_low, scratch);
    221 
    222   __ bind(&negate);
    223   // If input was positive, scratch_high ASR 31 equals 0 and
    224   // scratch_high LSR 31 equals zero.
    225   // New result = (result eor 0) + 0 = result.
    226   // If the input was negative, we have to negate the result.
    227   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
    228   // New result = (result eor 0xffffffff) + 1 = 0 - result.
    229   __ srawi(r0, scratch_high, 31);
    230 #if V8_TARGET_ARCH_PPC64
    231   __ srdi(r0, r0, Operand(32));
    232 #endif
    233   __ xor_(result_reg, result_reg, r0);
    234   __ srwi(r0, scratch_high, Operand(31));
    235   __ add(result_reg, result_reg, r0);
    236 
    237   __ bind(&done);
    238   __ Pop(scratch_high, scratch_low);
    239 
    240   __ bind(&fastpath_done);
    241   __ pop(scratch);
    242 
    243   __ Ret();
    244 }
    245 
    246 
    247 // Handle the case where the lhs and rhs are the same object.
    248 // Equality is almost reflexive (everything but NaN), so this is a test
    249 // for "identity and not NaN".
    250 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
    251                                           Condition cond, Strength strength) {
    252   Label not_identical;
    253   Label heap_number, return_equal;
    254   __ cmp(r3, r4);
    255   __ bne(&not_identical);
    256 
    257   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
    258   // so we do the second best thing - test it ourselves.
    259   // They are both equal and they are not both Smis so both of them are not
    260   // Smis.  If it's not a heap number, then return equal.
    261   if (cond == lt || cond == gt) {
    262     // Call runtime on identical JSObjects.
    263     __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
    264     __ bge(slow);
    265     // Call runtime on identical symbols since we need to throw a TypeError.
    266     __ cmpi(r7, Operand(SYMBOL_TYPE));
    267     __ beq(slow);
    268     // Call runtime on identical SIMD values since we must throw a TypeError.
    269     __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
    270     __ beq(slow);
    271     if (is_strong(strength)) {
    272       // Call the runtime on anything that is converted in the semantics, since
    273       // we need to throw a TypeError. Smis have already been ruled out.
    274       __ cmpi(r7, Operand(HEAP_NUMBER_TYPE));
    275       __ beq(&return_equal);
    276       __ andi(r0, r7, Operand(kIsNotStringMask));
    277       __ bne(slow, cr0);
    278     }
    279   } else {
    280     __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
    281     __ beq(&heap_number);
    282     // Comparing JS objects with <=, >= is complicated.
    283     if (cond != eq) {
    284       __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
    285       __ bge(slow);
    286       // Call runtime on identical symbols since we need to throw a TypeError.
    287       __ cmpi(r7, Operand(SYMBOL_TYPE));
    288       __ beq(slow);
    289       // Call runtime on identical SIMD values since we must throw a TypeError.
    290       __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
    291       __ beq(slow);
    292       if (is_strong(strength)) {
    293         // Call the runtime on anything that is converted in the semantics,
    294         // since we need to throw a TypeError. Smis and heap numbers have
    295         // already been ruled out.
    296         __ andi(r0, r7, Operand(kIsNotStringMask));
    297         __ bne(slow, cr0);
    298       }
    299       // Normally here we fall through to return_equal, but undefined is
    300       // special: (undefined == undefined) == true, but
    301       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    302       if (cond == le || cond == ge) {
    303         __ cmpi(r7, Operand(ODDBALL_TYPE));
    304         __ bne(&return_equal);
    305         __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
    306         __ cmp(r3, r5);
    307         __ bne(&return_equal);
    308         if (cond == le) {
    309           // undefined <= undefined should fail.
    310           __ li(r3, Operand(GREATER));
    311         } else {
    312           // undefined >= undefined should fail.
    313           __ li(r3, Operand(LESS));
    314         }
    315         __ Ret();
    316       }
    317     }
    318   }
    319 
    320   __ bind(&return_equal);
    321   if (cond == lt) {
    322     __ li(r3, Operand(GREATER));  // Things aren't less than themselves.
    323   } else if (cond == gt) {
    324     __ li(r3, Operand(LESS));  // Things aren't greater than themselves.
    325   } else {
    326     __ li(r3, Operand(EQUAL));  // Things are <=, >=, ==, === themselves.
    327   }
    328   __ Ret();
    329 
    330   // For less and greater we don't have to check for NaN since the result of
    331   // x < x is false regardless.  For the others here is some code to check
    332   // for NaN.
    333   if (cond != lt && cond != gt) {
    334     __ bind(&heap_number);
    335     // It is a heap number, so return non-equal if it's NaN and equal if it's
    336     // not NaN.
    337 
    338     // The representation of NaN values has all exponent bits (52..62) set,
    339     // and not all mantissa bits (0..51) clear.
    340     // Read top bits of double representation (second word of value).
    341     __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
    342     // Test that exponent bits are all set.
    343     STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
    344     __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
    345     __ cmpli(r6, Operand(0x7ff));
    346     __ bne(&return_equal);
    347 
    348     // Shift out flag and all exponent bits, retaining only mantissa.
    349     __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
    350     // Or with all low-bits of mantissa.
    351     __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
    352     __ orx(r3, r6, r5);
    353     __ cmpi(r3, Operand::Zero());
    354     // For equal we already have the right value in r3:  Return zero (equal)
    355     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
    356     // not (it's a NaN).  For <= and >= we need to load r0 with the failing
    357     // value if it's a NaN.
    358     if (cond != eq) {
    359       if (CpuFeatures::IsSupported(ISELECT)) {
    360         __ li(r4, Operand((cond == le) ? GREATER : LESS));
    361         __ isel(eq, r3, r3, r4);
    362       } else {
    363         // All-zero means Infinity means equal.
    364         __ Ret(eq);
    365         if (cond == le) {
    366           __ li(r3, Operand(GREATER));  // NaN <= NaN should fail.
    367         } else {
    368           __ li(r3, Operand(LESS));  // NaN >= NaN should fail.
    369         }
    370       }
    371     }
    372     __ Ret();
    373   }
    374   // No fall through here.
    375 
    376   __ bind(&not_identical);
    377 }
    378 
    379 
    380 // See comment at call site.
    381 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
    382                                     Register rhs, Label* lhs_not_nan,
    383                                     Label* slow, bool strict) {
    384   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
    385 
    386   Label rhs_is_smi;
    387   __ JumpIfSmi(rhs, &rhs_is_smi);
    388 
    389   // Lhs is a Smi.  Check whether the rhs is a heap number.
    390   __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
    391   if (strict) {
    392     // If rhs is not a number and lhs is a Smi then strict equality cannot
    393     // succeed.  Return non-equal
    394     // If rhs is r3 then there is already a non zero value in it.
    395     if (!rhs.is(r3)) {
    396       Label skip;
    397       __ beq(&skip);
    398       __ mov(r3, Operand(NOT_EQUAL));
    399       __ Ret();
    400       __ bind(&skip);
    401     } else {
    402       __ Ret(ne);
    403     }
    404   } else {
    405     // Smi compared non-strictly with a non-Smi non-heap-number.  Call
    406     // the runtime.
    407     __ bne(slow);
    408   }
    409 
    410   // Lhs is a smi, rhs is a number.
    411   // Convert lhs to a double in d7.
    412   __ SmiToDouble(d7, lhs);
    413   // Load the double from rhs, tagged HeapNumber r3, to d6.
    414   __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    415 
    416   // We now have both loaded as doubles but we can skip the lhs nan check
    417   // since it's a smi.
    418   __ b(lhs_not_nan);
    419 
    420   __ bind(&rhs_is_smi);
    421   // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
    422   __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
    423   if (strict) {
    424     // If lhs is not a number and rhs is a smi then strict equality cannot
    425     // succeed.  Return non-equal.
    426     // If lhs is r3 then there is already a non zero value in it.
    427     if (!lhs.is(r3)) {
    428       Label skip;
    429       __ beq(&skip);
    430       __ mov(r3, Operand(NOT_EQUAL));
    431       __ Ret();
    432       __ bind(&skip);
    433     } else {
    434       __ Ret(ne);
    435     }
    436   } else {
    437     // Smi compared non-strictly with a non-smi non-heap-number.  Call
    438     // the runtime.
    439     __ bne(slow);
    440   }
    441 
    442   // Rhs is a smi, lhs is a heap number.
    443   // Load the double from lhs, tagged HeapNumber r4, to d7.
    444   __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    445   // Convert rhs to a double in d6.
    446   __ SmiToDouble(d6, rhs);
    447   // Fall through to both_loaded_as_doubles.
    448 }
    449 
    450 
    451 // See comment at call site.
    452 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
    453                                            Register rhs) {
    454   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
    455 
    456   // If either operand is a JS object or an oddball value, then they are
    457   // not equal since their pointers are different.
    458   // There is no test for undetectability in strict equality.
    459   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    460   Label first_non_object;
    461   // Get the type of the first operand into r5 and compare it with
    462   // FIRST_JS_RECEIVER_TYPE.
    463   __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
    464   __ blt(&first_non_object);
    465 
    466   // Return non-zero (r3 is not zero)
    467   Label return_not_equal;
    468   __ bind(&return_not_equal);
    469   __ Ret();
    470 
    471   __ bind(&first_non_object);
    472   // Check for oddballs: true, false, null, undefined.
    473   __ cmpi(r5, Operand(ODDBALL_TYPE));
    474   __ beq(&return_not_equal);
    475 
    476   __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
    477   __ bge(&return_not_equal);
    478 
    479   // Check for oddballs: true, false, null, undefined.
    480   __ cmpi(r6, Operand(ODDBALL_TYPE));
    481   __ beq(&return_not_equal);
    482 
    483   // Now that we have the types we might as well check for
    484   // internalized-internalized.
    485   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    486   __ orx(r5, r5, r6);
    487   __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
    488   __ beq(&return_not_equal, cr0);
    489 }
    490 
    491 
    492 // See comment at call site.
    493 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
    494                                        Register rhs,
    495                                        Label* both_loaded_as_doubles,
    496                                        Label* not_heap_numbers, Label* slow) {
    497   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
    498 
    499   __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
    500   __ bne(not_heap_numbers);
    501   __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
    502   __ cmp(r5, r6);
    503   __ bne(slow);  // First was a heap number, second wasn't.  Go slow case.
    504 
    505   // Both are heap numbers.  Load them up then jump to the code we have
    506   // for that.
    507   __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    508   __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    509 
    510   __ b(both_loaded_as_doubles);
    511 }
    512 
    513 
    514 // Fast negative check for internalized-to-internalized equality.
    515 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
    516                                                      Register lhs, Register rhs,
    517                                                      Label* possible_strings,
    518                                                      Label* not_both_strings) {
    519   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
    520 
    521   // r5 is object type of rhs.
    522   Label object_test;
    523   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    524   __ andi(r0, r5, Operand(kIsNotStringMask));
    525   __ bne(&object_test, cr0);
    526   __ andi(r0, r5, Operand(kIsNotInternalizedMask));
    527   __ bne(possible_strings, cr0);
    528   __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
    529   __ bge(not_both_strings);
    530   __ andi(r0, r6, Operand(kIsNotInternalizedMask));
    531   __ bne(possible_strings, cr0);
    532 
    533   // Both are internalized.  We already checked they weren't the same pointer
    534   // so they are not equal.
    535   __ li(r3, Operand(NOT_EQUAL));
    536   __ Ret();
    537 
    538   __ bind(&object_test);
    539   __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
    540   __ blt(not_both_strings);
    541   __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
    542   __ blt(not_both_strings);
    543   // If both objects are undetectable, they are equal. Otherwise, they
    544   // are not equal, since they are different objects and an object is not
    545   // equal to undefined.
    546   __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
    547   __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
    548   __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
    549   __ and_(r3, r5, r6);
    550   __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
    551   __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
    552   __ Ret();
    553 }
    554 
    555 
    556 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
    557                                          Register scratch,
    558                                          CompareICState::State expected,
    559                                          Label* fail) {
    560   Label ok;
    561   if (expected == CompareICState::SMI) {
    562     __ JumpIfNotSmi(input, fail);
    563   } else if (expected == CompareICState::NUMBER) {
    564     __ JumpIfSmi(input, &ok);
    565     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
    566                 DONT_DO_SMI_CHECK);
    567   }
    568   // We could be strict about internalized/non-internalized here, but as long as
    569   // hydrogen doesn't care, the stub doesn't have to care either.
    570   __ bind(&ok);
    571 }
    572 
    573 
    574 // On entry r4 and r5 are the values to be compared.
    575 // On exit r3 is 0, positive or negative to indicate the result of
    576 // the comparison.
    577 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
    578   Register lhs = r4;
    579   Register rhs = r3;
    580   Condition cc = GetCondition();
    581 
    582   Label miss;
    583   CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
    584   CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
    585 
    586   Label slow;  // Call builtin.
    587   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
    588 
    589   Label not_two_smis, smi_done;
    590   __ orx(r5, r4, r3);
    591   __ JumpIfNotSmi(r5, &not_two_smis);
    592   __ SmiUntag(r4);
    593   __ SmiUntag(r3);
    594   __ sub(r3, r4, r3);
    595   __ Ret();
    596   __ bind(&not_two_smis);
    597 
    598   // NOTICE! This code is only reached after a smi-fast-case check, so
    599   // it is certain that at least one operand isn't a smi.
    600 
    601   // Handle the case where the objects are identical.  Either returns the answer
    602   // or goes to slow.  Only falls through if the objects were not identical.
    603   EmitIdenticalObjectComparison(masm, &slow, cc, strength());
    604 
    605   // If either is a Smi (we know that not both are), then they can only
    606   // be strictly equal if the other is a HeapNumber.
    607   STATIC_ASSERT(kSmiTag == 0);
    608   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
    609   __ and_(r5, lhs, rhs);
    610   __ JumpIfNotSmi(r5, &not_smis);
    611   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
    612   // 1) Return the answer.
    613   // 2) Go to slow.
    614   // 3) Fall through to both_loaded_as_doubles.
    615   // 4) Jump to lhs_not_nan.
    616   // In cases 3 and 4 we have found out we were dealing with a number-number
    617   // comparison.  The double values of the numbers have been loaded
    618   // into d7 and d6.
    619   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
    620 
    621   __ bind(&both_loaded_as_doubles);
    622   // The arguments have been converted to doubles and stored in d6 and d7
    623   __ bind(&lhs_not_nan);
    624   Label no_nan;
    625   __ fcmpu(d7, d6);
    626 
    627   Label nan, equal, less_than;
    628   __ bunordered(&nan);
    629   if (CpuFeatures::IsSupported(ISELECT)) {
    630     DCHECK(EQUAL == 0);
    631     __ li(r4, Operand(GREATER));
    632     __ li(r5, Operand(LESS));
    633     __ isel(eq, r3, r0, r4);
    634     __ isel(lt, r3, r5, r3);
    635     __ Ret();
    636   } else {
    637     __ beq(&equal);
    638     __ blt(&less_than);
    639     __ li(r3, Operand(GREATER));
    640     __ Ret();
    641     __ bind(&equal);
    642     __ li(r3, Operand(EQUAL));
    643     __ Ret();
    644     __ bind(&less_than);
    645     __ li(r3, Operand(LESS));
    646     __ Ret();
    647   }
    648 
    649   __ bind(&nan);
    650   // If one of the sides was a NaN then the v flag is set.  Load r3 with
    651   // whatever it takes to make the comparison fail, since comparisons with NaN
    652   // always fail.
    653   if (cc == lt || cc == le) {
    654     __ li(r3, Operand(GREATER));
    655   } else {
    656     __ li(r3, Operand(LESS));
    657   }
    658   __ Ret();
    659 
    660   __ bind(&not_smis);
    661   // At this point we know we are dealing with two different objects,
    662   // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
    663   if (strict()) {
    664     // This returns non-equal for some object types, or falls through if it
    665     // was not lucky.
    666     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
    667   }
    668 
    669   Label check_for_internalized_strings;
    670   Label flat_string_check;
    671   // Check for heap-number-heap-number comparison.  Can jump to slow case,
    672   // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
    673   // that case.  If the inputs are not doubles then jumps to
    674   // check_for_internalized_strings.
    675   // In this case r5 will contain the type of rhs_.  Never falls through.
    676   EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
    677                              &check_for_internalized_strings,
    678                              &flat_string_check);
    679 
    680   __ bind(&check_for_internalized_strings);
    681   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
    682   // internalized strings.
    683   if (cc == eq && !strict()) {
    684     // Returns an answer for two internalized strings or two detectable objects.
    685     // Otherwise jumps to string case or not both strings case.
    686     // Assumes that r5 is the type of rhs_ on entry.
    687     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
    688                                              &slow);
    689   }
    690 
    691   // Check for both being sequential one-byte strings,
    692   // and inline if that is the case.
    693   __ bind(&flat_string_check);
    694 
    695   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
    696 
    697   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
    698                       r6);
    699   if (cc == eq) {
    700     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
    701   } else {
    702     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
    703   }
    704   // Never falls through to here.
    705 
    706   __ bind(&slow);
    707 
    708   __ Push(lhs, rhs);
    709   // Figure out which native to call and setup the arguments.
    710   if (cc == eq) {
    711     __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
    712   } else {
    713     int ncr;  // NaN compare result
    714     if (cc == lt || cc == le) {
    715       ncr = GREATER;
    716     } else {
    717       DCHECK(cc == gt || cc == ge);  // remaining cases
    718       ncr = LESS;
    719     }
    720     __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
    721     __ push(r3);
    722 
    723     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    724     // tagged as a small integer.
    725     __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
    726                                              : Runtime::kCompare);
    727   }
    728 
    729   __ bind(&miss);
    730   GenerateMiss(masm);
    731 }
    732 
    733 
    734 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
    735   // We don't allow a GC during a store buffer overflow so there is no need to
    736   // store the registers in any particular way, but we do have to store and
    737   // restore them.
    738   __ mflr(r0);
    739   __ MultiPush(kJSCallerSaved | r0.bit());
    740   if (save_doubles()) {
    741     __ MultiPushDoubles(kCallerSavedDoubles);
    742   }
    743   const int argument_count = 1;
    744   const int fp_argument_count = 0;
    745   const Register scratch = r4;
    746 
    747   AllowExternalCallThatCantCauseGC scope(masm);
    748   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
    749   __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
    750   __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
    751                    argument_count);
    752   if (save_doubles()) {
    753     __ MultiPopDoubles(kCallerSavedDoubles);
    754   }
    755   __ MultiPop(kJSCallerSaved | r0.bit());
    756   __ mtlr(r0);
    757   __ Ret();
    758 }
    759 
    760 
    761 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
    762   __ PushSafepointRegisters();
    763   __ blr();
    764 }
    765 
    766 
    767 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
    768   __ PopSafepointRegisters();
    769   __ blr();
    770 }
    771 
    772 
    773 void MathPowStub::Generate(MacroAssembler* masm) {
    774   const Register base = r4;
    775   const Register exponent = MathPowTaggedDescriptor::exponent();
    776   DCHECK(exponent.is(r5));
    777   const Register heapnumbermap = r8;
    778   const Register heapnumber = r3;
    779   const DoubleRegister double_base = d1;
    780   const DoubleRegister double_exponent = d2;
    781   const DoubleRegister double_result = d3;
    782   const DoubleRegister double_scratch = d0;
    783   const Register scratch = r11;
    784   const Register scratch2 = r10;
    785 
    786   Label call_runtime, done, int_exponent;
    787   if (exponent_type() == ON_STACK) {
    788     Label base_is_smi, unpack_exponent;
    789     // The exponent and base are supplied as arguments on the stack.
    790     // This can only happen if the stub is called from non-optimized code.
    791     // Load input parameters from stack to double registers.
    792     __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
    793     __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
    794 
    795     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
    796 
    797     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
    798     __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
    799     __ cmp(scratch, heapnumbermap);
    800     __ bne(&call_runtime);
    801 
    802     __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
    803     __ b(&unpack_exponent);
    804 
    805     __ bind(&base_is_smi);
    806     __ ConvertIntToDouble(scratch, double_base);
    807     __ bind(&unpack_exponent);
    808 
    809     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    810     __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
    811     __ cmp(scratch, heapnumbermap);
    812     __ bne(&call_runtime);
    813 
    814     __ lfd(double_exponent,
    815            FieldMemOperand(exponent, HeapNumber::kValueOffset));
    816   } else if (exponent_type() == TAGGED) {
    817     // Base is already in double_base.
    818     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    819 
    820     __ lfd(double_exponent,
    821            FieldMemOperand(exponent, HeapNumber::kValueOffset));
    822   }
    823 
    824   if (exponent_type() != INTEGER) {
    825     // Detect integer exponents stored as double.
    826     __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
    827                              double_scratch);
    828     __ beq(&int_exponent);
    829 
    830     if (exponent_type() == ON_STACK) {
    831       // Detect square root case.  Crankshaft detects constant +/-0.5 at
    832       // compile time and uses DoMathPowHalf instead.  We then skip this check
    833       // for non-constant cases of +/-0.5 as these hardly occur.
    834       Label not_plus_half, not_minus_inf1, not_minus_inf2;
    835 
    836       // Test for 0.5.
    837       __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
    838       __ fcmpu(double_exponent, double_scratch);
    839       __ bne(&not_plus_half);
    840 
    841       // Calculates square root of base.  Check for the special case of
    842       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
    843       __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
    844       __ fcmpu(double_base, double_scratch);
    845       __ bne(&not_minus_inf1);
    846       __ fneg(double_result, double_scratch);
    847       __ b(&done);
    848       __ bind(&not_minus_inf1);
    849 
    850       // Add +0 to convert -0 to +0.
    851       __ fadd(double_scratch, double_base, kDoubleRegZero);
    852       __ fsqrt(double_result, double_scratch);
    853       __ b(&done);
    854 
    855       __ bind(&not_plus_half);
    856       __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
    857       __ fcmpu(double_exponent, double_scratch);
    858       __ bne(&call_runtime);
    859 
    860       // Calculates square root of base.  Check for the special case of
    861       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
    862       __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
    863       __ fcmpu(double_base, double_scratch);
    864       __ bne(&not_minus_inf2);
    865       __ fmr(double_result, kDoubleRegZero);
    866       __ b(&done);
    867       __ bind(&not_minus_inf2);
    868 
    869       // Add +0 to convert -0 to +0.
    870       __ fadd(double_scratch, double_base, kDoubleRegZero);
    871       __ LoadDoubleLiteral(double_result, 1.0, scratch);
    872       __ fsqrt(double_scratch, double_scratch);
    873       __ fdiv(double_result, double_result, double_scratch);
    874       __ b(&done);
    875     }
    876 
    877     __ mflr(r0);
    878     __ push(r0);
    879     {
    880       AllowExternalCallThatCantCauseGC scope(masm);
    881       __ PrepareCallCFunction(0, 2, scratch);
    882       __ MovToFloatParameters(double_base, double_exponent);
    883       __ CallCFunction(
    884           ExternalReference::power_double_double_function(isolate()), 0, 2);
    885     }
    886     __ pop(r0);
    887     __ mtlr(r0);
    888     __ MovFromFloatResult(double_result);
    889     __ b(&done);
    890   }
    891 
    892   // Calculate power with integer exponent.
    893   __ bind(&int_exponent);
    894 
    895   // Get two copies of exponent in the registers scratch and exponent.
    896   if (exponent_type() == INTEGER) {
    897     __ mr(scratch, exponent);
    898   } else {
    899     // Exponent has previously been stored into scratch as untagged integer.
    900     __ mr(exponent, scratch);
    901   }
    902   __ fmr(double_scratch, double_base);  // Back up base.
    903   __ li(scratch2, Operand(1));
    904   __ ConvertIntToDouble(scratch2, double_result);
    905 
    906   // Get absolute value of exponent.
    907   __ cmpi(scratch, Operand::Zero());
    908   if (CpuFeatures::IsSupported(ISELECT)) {
    909     __ neg(scratch2, scratch);
    910     __ isel(lt, scratch, scratch2, scratch);
    911   } else {
    912     Label positive_exponent;
    913     __ bge(&positive_exponent);
    914     __ neg(scratch, scratch);
    915     __ bind(&positive_exponent);
    916   }
    917 
    918   Label while_true, no_carry, loop_end;
    919   __ bind(&while_true);
    920   __ andi(scratch2, scratch, Operand(1));
    921   __ beq(&no_carry, cr0);
    922   __ fmul(double_result, double_result, double_scratch);
    923   __ bind(&no_carry);
    924   __ ShiftRightArithImm(scratch, scratch, 1, SetRC);
    925   __ beq(&loop_end, cr0);
    926   __ fmul(double_scratch, double_scratch, double_scratch);
    927   __ b(&while_true);
    928   __ bind(&loop_end);
    929 
    930   __ cmpi(exponent, Operand::Zero());
    931   __ bge(&done);
    932 
    933   __ li(scratch2, Operand(1));
    934   __ ConvertIntToDouble(scratch2, double_scratch);
    935   __ fdiv(double_result, double_scratch, double_result);
    936   // Test whether result is zero.  Bail out to check for subnormal result.
    937   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
    938   __ fcmpu(double_result, kDoubleRegZero);
    939   __ bne(&done);
    940   // double_exponent may not containe the exponent value if the input was a
    941   // smi.  We set it with exponent value before bailing out.
    942   __ ConvertIntToDouble(exponent, double_exponent);
    943 
    944   // Returning or bailing out.
    945   Counters* counters = isolate()->counters();
    946   if (exponent_type() == ON_STACK) {
    947     // The arguments are still on the stack.
    948     __ bind(&call_runtime);
    949     __ TailCallRuntime(Runtime::kMathPowRT);
    950 
    951     // The stub is called from non-optimized code, which expects the result
    952     // as heap number in exponent.
    953     __ bind(&done);
    954     __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
    955                           &call_runtime);
    956     __ stfd(double_result,
    957             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
    958     DCHECK(heapnumber.is(r3));
    959     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
    960     __ Ret(2);
    961   } else {
    962     __ mflr(r0);
    963     __ push(r0);
    964     {
    965       AllowExternalCallThatCantCauseGC scope(masm);
    966       __ PrepareCallCFunction(0, 2, scratch);
    967       __ MovToFloatParameters(double_base, double_exponent);
    968       __ CallCFunction(
    969           ExternalReference::power_double_double_function(isolate()), 0, 2);
    970     }
    971     __ pop(r0);
    972     __ mtlr(r0);
    973     __ MovFromFloatResult(double_result);
    974 
    975     __ bind(&done);
    976     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
    977     __ Ret();
    978   }
    979 }
    980 
    981 
    982 bool CEntryStub::NeedsImmovableCode() { return true; }
    983 
    984 
    985 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
    986   CEntryStub::GenerateAheadOfTime(isolate);
    987   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
    988   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
    989   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
    990   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
    991   CreateWeakCellStub::GenerateAheadOfTime(isolate);
    992   BinaryOpICStub::GenerateAheadOfTime(isolate);
    993   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
    994   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
    995   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
    996   StoreFastElementStub::GenerateAheadOfTime(isolate);
    997   TypeofStub::GenerateAheadOfTime(isolate);
    998 }
    999 
   1000 
   1001 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
   1002   StoreRegistersStateStub stub(isolate);
   1003   stub.GetCode();
   1004 }
   1005 
   1006 
   1007 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
   1008   RestoreRegistersStateStub stub(isolate);
   1009   stub.GetCode();
   1010 }
   1011 
   1012 
   1013 void CodeStub::GenerateFPStubs(Isolate* isolate) {
   1014   // Generate if not already in cache.
   1015   SaveFPRegsMode mode = kSaveFPRegs;
   1016   CEntryStub(isolate, 1, mode).GetCode();
   1017   StoreBufferOverflowStub(isolate, mode).GetCode();
   1018   isolate->set_fp_stubs_generated(true);
   1019 }
   1020 
   1021 
   1022 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
   1023   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
   1024   stub.GetCode();
   1025 }
   1026 
   1027 
   1028 void CEntryStub::Generate(MacroAssembler* masm) {
   1029   // Called from JavaScript; parameters are on stack as if calling JS function.
   1030   // r3: number of arguments including receiver
   1031   // r4: pointer to builtin function
   1032   // fp: frame pointer  (restored after C call)
   1033   // sp: stack pointer  (restored as callee's sp after C call)
   1034   // cp: current context  (C callee-saved)
   1035   //
   1036   // If argv_in_register():
   1037   // r5: pointer to the first argument
   1038   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1039 
   1040   __ mr(r15, r4);
   1041 
   1042   if (argv_in_register()) {
   1043     // Move argv into the correct register.
   1044     __ mr(r4, r5);
   1045   } else {
   1046     // Compute the argv pointer.
   1047     __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
   1048     __ add(r4, r4, sp);
   1049     __ subi(r4, r4, Operand(kPointerSize));
   1050   }
   1051 
   1052   // Enter the exit frame that transitions from JavaScript to C++.
   1053   FrameScope scope(masm, StackFrame::MANUAL);
   1054 
   1055   // Need at least one extra slot for return address location.
   1056   int arg_stack_space = 1;
   1057 
   1058 // PPC LINUX ABI:
   1059 #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
   1060   // Pass buffer for return value on stack if necessary
   1061   if (result_size() > 1) {
   1062     DCHECK_EQ(2, result_size());
   1063     arg_stack_space += 2;
   1064   }
   1065 #endif
   1066 
   1067   __ EnterExitFrame(save_doubles(), arg_stack_space);
   1068 
   1069   // Store a copy of argc in callee-saved registers for later.
   1070   __ mr(r14, r3);
   1071 
   1072   // r3, r14: number of arguments including receiver  (C callee-saved)
   1073   // r4: pointer to the first argument
   1074   // r15: pointer to builtin function  (C callee-saved)
   1075 
   1076   // Result returned in registers or stack, depending on result size and ABI.
   1077 
   1078   Register isolate_reg = r5;
   1079 #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
   1080   if (result_size() > 1) {
   1081     // The return value is 16-byte non-scalar value.
   1082     // Use frame storage reserved by calling function to pass return
   1083     // buffer as implicit first argument.
   1084     __ mr(r5, r4);
   1085     __ mr(r4, r3);
   1086     __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
   1087     isolate_reg = r6;
   1088   }
   1089 #endif
   1090 
   1091   // Call C built-in.
   1092   __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
   1093 
   1094   Register target = r15;
   1095 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
   1096   // Native AIX/PPC64 Linux use a function descriptor.
   1097   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
   1098   __ LoadP(ip, MemOperand(r15, 0));  // Instruction address
   1099   target = ip;
   1100 #elif ABI_CALL_VIA_IP
   1101   __ Move(ip, r15);
   1102   target = ip;
   1103 #endif
   1104 
   1105   // To let the GC traverse the return address of the exit frames, we need to
   1106   // know where the return address is. The CEntryStub is unmovable, so
   1107   // we can store the address on the stack to be able to find it again and
   1108   // we never have to restore it, because it will not change.
   1109   Label after_call;
   1110   __ mov_label_addr(r0, &after_call);
   1111   __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
   1112   __ Call(target);
   1113   __ bind(&after_call);
   1114 
   1115 #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
   1116   // If return value is on the stack, pop it to registers.
   1117   if (result_size() > 1) {
   1118     __ LoadP(r4, MemOperand(r3, kPointerSize));
   1119     __ LoadP(r3, MemOperand(r3));
   1120   }
   1121 #endif
   1122 
   1123   // Check result for exception sentinel.
   1124   Label exception_returned;
   1125   __ CompareRoot(r3, Heap::kExceptionRootIndex);
   1126   __ beq(&exception_returned);
   1127 
   1128   // Check that there is no pending exception, otherwise we
   1129   // should have returned the exception sentinel.
   1130   if (FLAG_debug_code) {
   1131     Label okay;
   1132     ExternalReference pending_exception_address(
   1133         Isolate::kPendingExceptionAddress, isolate());
   1134 
   1135     __ mov(r5, Operand(pending_exception_address));
   1136     __ LoadP(r5, MemOperand(r5));
   1137     __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
   1138     // Cannot use check here as it attempts to generate call into runtime.
   1139     __ beq(&okay);
   1140     __ stop("Unexpected pending exception");
   1141     __ bind(&okay);
   1142   }
   1143 
   1144   // Exit C frame and return.
   1145   // r3:r4: result
   1146   // sp: stack pointer
   1147   // fp: frame pointer
   1148   Register argc;
   1149   if (argv_in_register()) {
   1150     // We don't want to pop arguments so set argc to no_reg.
   1151     argc = no_reg;
   1152   } else {
   1153     // r14: still holds argc (callee-saved).
   1154     argc = r14;
   1155   }
   1156   __ LeaveExitFrame(save_doubles(), argc, true);
   1157   __ blr();
   1158 
   1159   // Handling of exception.
   1160   __ bind(&exception_returned);
   1161 
   1162   ExternalReference pending_handler_context_address(
   1163       Isolate::kPendingHandlerContextAddress, isolate());
   1164   ExternalReference pending_handler_code_address(
   1165       Isolate::kPendingHandlerCodeAddress, isolate());
   1166   ExternalReference pending_handler_offset_address(
   1167       Isolate::kPendingHandlerOffsetAddress, isolate());
   1168   ExternalReference pending_handler_fp_address(
   1169       Isolate::kPendingHandlerFPAddress, isolate());
   1170   ExternalReference pending_handler_sp_address(
   1171       Isolate::kPendingHandlerSPAddress, isolate());
   1172 
   1173   // Ask the runtime for help to determine the handler. This will set r3 to
   1174   // contain the current pending exception, don't clobber it.
   1175   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
   1176                                  isolate());
   1177   {
   1178     FrameScope scope(masm, StackFrame::MANUAL);
   1179     __ PrepareCallCFunction(3, 0, r3);
   1180     __ li(r3, Operand::Zero());
   1181     __ li(r4, Operand::Zero());
   1182     __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
   1183     __ CallCFunction(find_handler, 3);
   1184   }
   1185 
   1186   // Retrieve the handler context, SP and FP.
   1187   __ mov(cp, Operand(pending_handler_context_address));
   1188   __ LoadP(cp, MemOperand(cp));
   1189   __ mov(sp, Operand(pending_handler_sp_address));
   1190   __ LoadP(sp, MemOperand(sp));
   1191   __ mov(fp, Operand(pending_handler_fp_address));
   1192   __ LoadP(fp, MemOperand(fp));
   1193 
   1194   // If the handler is a JS frame, restore the context to the frame. Note that
   1195   // the context will be set to (cp == 0) for non-JS frames.
   1196   Label skip;
   1197   __ cmpi(cp, Operand::Zero());
   1198   __ beq(&skip);
   1199   __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1200   __ bind(&skip);
   1201 
   1202   // Compute the handler entry address and jump to it.
   1203   ConstantPoolUnavailableScope constant_pool_unavailable(masm);
   1204   __ mov(r4, Operand(pending_handler_code_address));
   1205   __ LoadP(r4, MemOperand(r4));
   1206   __ mov(r5, Operand(pending_handler_offset_address));
   1207   __ LoadP(r5, MemOperand(r5));
   1208   __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
   1209   if (FLAG_enable_embedded_constant_pool) {
   1210     __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4);
   1211   }
   1212   __ add(ip, r4, r5);
   1213   __ Jump(ip);
   1214 }
   1215 
   1216 
   1217 void JSEntryStub::Generate(MacroAssembler* masm) {
   1218   // r3: code entry
   1219   // r4: function
   1220   // r5: receiver
   1221   // r6: argc
   1222   // [sp+0]: argv
   1223 
   1224   Label invoke, handler_entry, exit;
   1225 
   1226 // Called from C
   1227   __ function_descriptor();
   1228 
   1229   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1230 
   1231   // PPC LINUX ABI:
   1232   // preserve LR in pre-reserved slot in caller's frame
   1233   __ mflr(r0);
   1234   __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
   1235 
   1236   // Save callee saved registers on the stack.
   1237   __ MultiPush(kCalleeSaved);
   1238 
   1239   // Save callee-saved double registers.
   1240   __ MultiPushDoubles(kCalleeSavedDoubles);
   1241   // Set up the reserved register for 0.0.
   1242   __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
   1243 
   1244   // Push a frame with special values setup to mark it as an entry frame.
   1245   // r3: code entry
   1246   // r4: function
   1247   // r5: receiver
   1248   // r6: argc
   1249   // r7: argv
   1250   __ li(r0, Operand(-1));  // Push a bad frame pointer to fail if it is used.
   1251   __ push(r0);
   1252   if (FLAG_enable_embedded_constant_pool) {
   1253     __ li(kConstantPoolRegister, Operand::Zero());
   1254     __ push(kConstantPoolRegister);
   1255   }
   1256   int marker = type();
   1257   __ LoadSmiLiteral(r0, Smi::FromInt(marker));
   1258   __ push(r0);
   1259   __ push(r0);
   1260   // Save copies of the top frame descriptor on the stack.
   1261   __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1262   __ LoadP(r0, MemOperand(r8));
   1263   __ push(r0);
   1264 
   1265   // Set up frame pointer for the frame to be pushed.
   1266   __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
   1267 
   1268   // If this is the outermost JS call, set js_entry_sp value.
   1269   Label non_outermost_js;
   1270   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
   1271   __ mov(r8, Operand(ExternalReference(js_entry_sp)));
   1272   __ LoadP(r9, MemOperand(r8));
   1273   __ cmpi(r9, Operand::Zero());
   1274   __ bne(&non_outermost_js);
   1275   __ StoreP(fp, MemOperand(r8));
   1276   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1277   Label cont;
   1278   __ b(&cont);
   1279   __ bind(&non_outermost_js);
   1280   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
   1281   __ bind(&cont);
   1282   __ push(ip);  // frame-type
   1283 
   1284   // Jump to a faked try block that does the invoke, with a faked catch
   1285   // block that sets the pending exception.
   1286   __ b(&invoke);
   1287 
   1288   __ bind(&handler_entry);
   1289   handler_offset_ = handler_entry.pos();
   1290   // Caught exception: Store result (exception) in the pending exception
   1291   // field in the JSEnv and return a failure sentinel.  Coming in here the
   1292   // fp will be invalid because the PushStackHandler below sets it to 0 to
   1293   // signal the existence of the JSEntry frame.
   1294   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1295                                        isolate())));
   1296 
   1297   __ StoreP(r3, MemOperand(ip));
   1298   __ LoadRoot(r3, Heap::kExceptionRootIndex);
   1299   __ b(&exit);
   1300 
   1301   // Invoke: Link this frame into the handler chain.
   1302   __ bind(&invoke);
   1303   // Must preserve r3-r7.
   1304   __ PushStackHandler();
   1305   // If an exception not caught by another handler occurs, this handler
   1306   // returns control to the code after the b(&invoke) above, which
   1307   // restores all kCalleeSaved registers (including cp and fp) to their
   1308   // saved values before returning a failure to C.
   1309 
   1310   // Clear any pending exceptions.
   1311   __ mov(r8, Operand(isolate()->factory()->the_hole_value()));
   1312   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1313                                        isolate())));
   1314   __ StoreP(r8, MemOperand(ip));
   1315 
   1316   // Invoke the function by calling through JS entry trampoline builtin.
   1317   // Notice that we cannot store a reference to the trampoline code directly in
   1318   // this stub, because runtime stubs are not traversed when doing GC.
   1319 
   1320   // Expected registers by Builtins::JSEntryTrampoline
   1321   // r3: code entry
   1322   // r4: function
   1323   // r5: receiver
   1324   // r6: argc
   1325   // r7: argv
   1326   if (type() == StackFrame::ENTRY_CONSTRUCT) {
   1327     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
   1328                                       isolate());
   1329     __ mov(ip, Operand(construct_entry));
   1330   } else {
   1331     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
   1332     __ mov(ip, Operand(entry));
   1333   }
   1334   __ LoadP(ip, MemOperand(ip));  // deref address
   1335 
   1336   // Branch and link to JSEntryTrampoline.
   1337   // the address points to the start of the code object, skip the header
   1338   __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   1339   __ mtctr(ip);
   1340   __ bctrl();  // make the call
   1341 
   1342   // Unlink this frame from the handler chain.
   1343   __ PopStackHandler();
   1344 
   1345   __ bind(&exit);  // r3 holds result
   1346   // Check if the current stack frame is marked as the outermost JS frame.
   1347   Label non_outermost_js_2;
   1348   __ pop(r8);
   1349   __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
   1350   __ bne(&non_outermost_js_2);
   1351   __ mov(r9, Operand::Zero());
   1352   __ mov(r8, Operand(ExternalReference(js_entry_sp)));
   1353   __ StoreP(r9, MemOperand(r8));
   1354   __ bind(&non_outermost_js_2);
   1355 
   1356   // Restore the top frame descriptors from the stack.
   1357   __ pop(r6);
   1358   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1359   __ StoreP(r6, MemOperand(ip));
   1360 
   1361   // Reset the stack to the callee saved registers.
   1362   __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
   1363 
   1364   // Restore callee-saved double registers.
   1365   __ MultiPopDoubles(kCalleeSavedDoubles);
   1366 
   1367   // Restore callee-saved registers.
   1368   __ MultiPop(kCalleeSaved);
   1369 
   1370   // Return
   1371   __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
   1372   __ mtlr(r0);
   1373   __ blr();
   1374 }
   1375 
   1376 
   1377 void InstanceOfStub::Generate(MacroAssembler* masm) {
   1378   Register const object = r4;              // Object (lhs).
   1379   Register const function = r3;            // Function (rhs).
   1380   Register const object_map = r5;          // Map of {object}.
   1381   Register const function_map = r6;        // Map of {function}.
   1382   Register const function_prototype = r7;  // Prototype of {function}.
   1383   Register const scratch = r8;
   1384 
   1385   DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
   1386   DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
   1387 
   1388   // Check if {object} is a smi.
   1389   Label object_is_smi;
   1390   __ JumpIfSmi(object, &object_is_smi);
   1391 
   1392   // Lookup the {function} and the {object} map in the global instanceof cache.
   1393   // Note: This is safe because we clear the global instanceof cache whenever
   1394   // we change the prototype of any object.
   1395   Label fast_case, slow_case;
   1396   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   1397   __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
   1398   __ bne(&fast_case);
   1399   __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
   1400   __ bne(&fast_case);
   1401   __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
   1402   __ Ret();
   1403 
   1404   // If {object} is a smi we can safely return false if {function} is a JS
   1405   // function, otherwise we have to miss to the runtime and throw an exception.
   1406   __ bind(&object_is_smi);
   1407   __ JumpIfSmi(function, &slow_case);
   1408   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
   1409   __ bne(&slow_case);
   1410   __ LoadRoot(r3, Heap::kFalseValueRootIndex);
   1411   __ Ret();
   1412 
   1413   // Fast-case: The {function} must be a valid JSFunction.
   1414   __ bind(&fast_case);
   1415   __ JumpIfSmi(function, &slow_case);
   1416   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
   1417   __ bne(&slow_case);
   1418 
   1419   // Ensure that {function} has an instance prototype.
   1420   __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
   1421   __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
   1422   __ bne(&slow_case, cr0);
   1423 
   1424   // Get the "prototype" (or initial map) of the {function}.
   1425   __ LoadP(function_prototype,
   1426            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   1427   __ AssertNotSmi(function_prototype);
   1428 
   1429   // Resolve the prototype if the {function} has an initial map.  Afterwards the
   1430   // {function_prototype} will be either the JSReceiver prototype object or the
   1431   // hole value, which means that no instances of the {function} were created so
   1432   // far and hence we should return false.
   1433   Label function_prototype_valid;
   1434   __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
   1435   __ bne(&function_prototype_valid);
   1436   __ LoadP(function_prototype,
   1437            FieldMemOperand(function_prototype, Map::kPrototypeOffset));
   1438   __ bind(&function_prototype_valid);
   1439   __ AssertNotSmi(function_prototype);
   1440 
   1441   // Update the global instanceof cache with the current {object} map and
   1442   // {function}.  The cached answer will be set when it is known below.
   1443   __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
   1444   __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
   1445 
   1446   // Loop through the prototype chain looking for the {function} prototype.
   1447   // Assume true, and change to false if not found.
   1448   Register const object_instance_type = function_map;
   1449   Register const map_bit_field = function_map;
   1450   Register const null = scratch;
   1451   Register const result = r3;
   1452 
   1453   Label done, loop, fast_runtime_fallback;
   1454   __ LoadRoot(result, Heap::kTrueValueRootIndex);
   1455   __ LoadRoot(null, Heap::kNullValueRootIndex);
   1456   __ bind(&loop);
   1457 
   1458   // Check if the object needs to be access checked.
   1459   __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
   1460   __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
   1461   __ bne(&fast_runtime_fallback, cr0);
   1462   // Check if the current object is a Proxy.
   1463   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
   1464   __ beq(&fast_runtime_fallback);
   1465 
   1466   __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
   1467   __ cmp(object, function_prototype);
   1468   __ beq(&done);
   1469   __ cmp(object, null);
   1470   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   1471   __ bne(&loop);
   1472   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   1473   __ bind(&done);
   1474   __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
   1475   __ Ret();
   1476 
   1477   // Found Proxy or access check needed: Call the runtime
   1478   __ bind(&fast_runtime_fallback);
   1479   __ Push(object, function_prototype);
   1480   // Invalidate the instanceof cache.
   1481   __ LoadSmiLiteral(scratch, Smi::FromInt(0));
   1482   __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
   1483   __ TailCallRuntime(Runtime::kHasInPrototypeChain);
   1484 
   1485   // Slow-case: Call the %InstanceOf runtime function.
   1486   __ bind(&slow_case);
   1487   __ Push(object, function);
   1488   __ TailCallRuntime(Runtime::kInstanceOf);
   1489 }
   1490 
   1491 
   1492 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   1493   Label miss;
   1494   Register receiver = LoadDescriptor::ReceiverRegister();
   1495   // Ensure that the vector and slot registers won't be clobbered before
   1496   // calling the miss handler.
   1497   DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(),
   1498                      LoadWithVectorDescriptor::SlotRegister()));
   1499 
   1500   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
   1501                                                           r8, &miss);
   1502   __ bind(&miss);
   1503   PropertyAccessCompiler::TailCallBuiltin(
   1504       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
   1505 }
   1506 
   1507 
   1508 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   1509   // Return address is in lr.
   1510   Label miss;
   1511 
   1512   Register receiver = LoadDescriptor::ReceiverRegister();
   1513   Register index = LoadDescriptor::NameRegister();
   1514   Register scratch = r8;
   1515   Register result = r3;
   1516   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   1517   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
   1518          result.is(LoadWithVectorDescriptor::SlotRegister()));
   1519 
   1520   // StringCharAtGenerator doesn't use the result register until it's passed
   1521   // the different miss possibilities. If it did, we would have a conflict
   1522   // when FLAG_vector_ics is true.
   1523   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
   1524                                           &miss,  // When not a string.
   1525                                           &miss,  // When not a number.
   1526                                           &miss,  // When index out of range.
   1527                                           STRING_INDEX_IS_ARRAY_INDEX,
   1528                                           RECEIVER_IS_STRING);
   1529   char_at_generator.GenerateFast(masm);
   1530   __ Ret();
   1531 
   1532   StubRuntimeCallHelper call_helper;
   1533   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
   1534 
   1535   __ bind(&miss);
   1536   PropertyAccessCompiler::TailCallBuiltin(
   1537       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
   1538 }
   1539 
   1540 
   1541 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   1542   // The displacement is the offset of the last parameter (if any)
   1543   // relative to the frame pointer.
   1544   const int kDisplacement =
   1545       StandardFrameConstants::kCallerSPOffset - kPointerSize;
   1546   DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
   1547   DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
   1548 
   1549   // Check that the key is a smi.
   1550   Label slow;
   1551   __ JumpIfNotSmi(r4, &slow);
   1552 
   1553   // Check if the calling frame is an arguments adaptor frame.
   1554   Label adaptor;
   1555   __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1556   __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
   1557   STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
   1558   __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   1559   __ beq(&adaptor);
   1560 
   1561   // Check index against formal parameters count limit passed in
   1562   // through register r3. Use unsigned comparison to get negative
   1563   // check for free.
   1564   __ cmpl(r4, r3);
   1565   __ bge(&slow);
   1566 
   1567   // Read the argument from the stack and return it.
   1568   __ sub(r6, r3, r4);
   1569   __ SmiToPtrArrayOffset(r6, r6);
   1570   __ add(r6, fp, r6);
   1571   __ LoadP(r3, MemOperand(r6, kDisplacement));
   1572   __ blr();
   1573 
   1574   // Arguments adaptor case: Check index against actual arguments
   1575   // limit found in the arguments adaptor frame. Use unsigned
   1576   // comparison to get negative check for free.
   1577   __ bind(&adaptor);
   1578   __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1579   __ cmpl(r4, r3);
   1580   __ bge(&slow);
   1581 
   1582   // Read the argument from the adaptor frame and return it.
   1583   __ sub(r6, r3, r4);
   1584   __ SmiToPtrArrayOffset(r6, r6);
   1585   __ add(r6, r5, r6);
   1586   __ LoadP(r3, MemOperand(r6, kDisplacement));
   1587   __ blr();
   1588 
   1589   // Slow-case: Handle non-smi or out-of-bounds access to arguments
   1590   // by calling the runtime system.
   1591   __ bind(&slow);
   1592   __ push(r4);
   1593   __ TailCallRuntime(Runtime::kArguments);
   1594 }
   1595 
   1596 
   1597 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
   1598   // r4 : function
   1599   // r5 : number of parameters (tagged)
   1600   // r6 : parameters pointer
   1601 
   1602   DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
   1603   DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
   1604   DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
   1605 
   1606   // Check if the calling frame is an arguments adaptor frame.
   1607   Label runtime;
   1608   __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1609   __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
   1610   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   1611   __ bne(&runtime);
   1612 
   1613   // Patch the arguments.length and the parameters pointer in the current frame.
   1614   __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1615   __ SmiToPtrArrayOffset(r6, r5);
   1616   __ add(r6, r6, r7);
   1617   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
   1618 
   1619   __ bind(&runtime);
   1620   __ Push(r4, r6, r5);
   1621   __ TailCallRuntime(Runtime::kNewSloppyArguments);
   1622 }
   1623 
   1624 
   1625 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
   1626   // r4 : function
   1627   // r5 : number of parameters (tagged)
   1628   // r6 : parameters pointer
   1629   // Registers used over whole function:
   1630   // r8 : arguments count (tagged)
   1631   // r9 : mapped parameter count (tagged)
   1632 
   1633   DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
   1634   DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
   1635   DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
   1636 
   1637   // Check if the calling frame is an arguments adaptor frame.
   1638   Label adaptor_frame, try_allocate, runtime;
   1639   __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1640   __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
   1641   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   1642   __ beq(&adaptor_frame);
   1643 
   1644   // No adaptor, parameter count = argument count.
   1645   __ mr(r8, r5);
   1646   __ mr(r9, r5);
   1647   __ b(&try_allocate);
   1648 
   1649   // We have an adaptor frame. Patch the parameters pointer.
   1650   __ bind(&adaptor_frame);
   1651   __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1652   __ SmiToPtrArrayOffset(r6, r8);
   1653   __ add(r6, r6, r7);
   1654   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
   1655 
   1656   // r8 = argument count (tagged)
   1657   // r9 = parameter count (tagged)
   1658   // Compute the mapped parameter count = min(r5, r8) in r9.
   1659   __ cmp(r5, r8);
   1660   if (CpuFeatures::IsSupported(ISELECT)) {
   1661     __ isel(lt, r9, r5, r8);
   1662   } else {
   1663     Label skip;
   1664     __ mr(r9, r5);
   1665     __ blt(&skip);
   1666     __ mr(r9, r8);
   1667     __ bind(&skip);
   1668   }
   1669 
   1670   __ bind(&try_allocate);
   1671 
   1672   // Compute the sizes of backing store, parameter map, and arguments object.
   1673   // 1. Parameter map, has 2 extra words containing context and backing store.
   1674   const int kParameterMapHeaderSize =
   1675       FixedArray::kHeaderSize + 2 * kPointerSize;
   1676   // If there are no mapped parameters, we do not need the parameter_map.
   1677   __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
   1678   if (CpuFeatures::IsSupported(ISELECT)) {
   1679     __ SmiToPtrArrayOffset(r11, r9);
   1680     __ addi(r11, r11, Operand(kParameterMapHeaderSize));
   1681     __ isel(eq, r11, r0, r11);
   1682   } else {
   1683     Label skip2, skip3;
   1684     __ bne(&skip2);
   1685     __ li(r11, Operand::Zero());
   1686     __ b(&skip3);
   1687     __ bind(&skip2);
   1688     __ SmiToPtrArrayOffset(r11, r9);
   1689     __ addi(r11, r11, Operand(kParameterMapHeaderSize));
   1690     __ bind(&skip3);
   1691   }
   1692 
   1693   // 2. Backing store.
   1694   __ SmiToPtrArrayOffset(r7, r8);
   1695   __ add(r11, r11, r7);
   1696   __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
   1697 
   1698   // 3. Arguments object.
   1699   __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
   1700 
   1701   // Do the allocation of all three objects in one go.
   1702   __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
   1703 
   1704   // r3 = address of new object(s) (tagged)
   1705   // r5 = argument count (smi-tagged)
   1706   // Get the arguments boilerplate from the current native context into r4.
   1707   const int kNormalOffset =
   1708       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
   1709   const int kAliasedOffset =
   1710       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
   1711 
   1712   __ LoadP(r7, NativeContextMemOperand());
   1713   __ cmpi(r9, Operand::Zero());
   1714   if (CpuFeatures::IsSupported(ISELECT)) {
   1715     __ LoadP(r11, MemOperand(r7, kNormalOffset));
   1716     __ LoadP(r7, MemOperand(r7, kAliasedOffset));
   1717     __ isel(eq, r7, r11, r7);
   1718   } else {
   1719     Label skip4, skip5;
   1720     __ bne(&skip4);
   1721     __ LoadP(r7, MemOperand(r7, kNormalOffset));
   1722     __ b(&skip5);
   1723     __ bind(&skip4);
   1724     __ LoadP(r7, MemOperand(r7, kAliasedOffset));
   1725     __ bind(&skip5);
   1726   }
   1727 
   1728   // r3 = address of new object (tagged)
   1729   // r5 = argument count (smi-tagged)
   1730   // r7 = address of arguments map (tagged)
   1731   // r9 = mapped parameter count (tagged)
   1732   __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
   1733   __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
   1734   __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
   1735   __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
   1736 
   1737   // Set up the callee in-object property.
   1738   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   1739   __ AssertNotSmi(r4);
   1740   const int kCalleeOffset =
   1741       JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
   1742   __ StoreP(r4, FieldMemOperand(r3, kCalleeOffset), r0);
   1743 
   1744   // Use the length (smi tagged) and set that as an in-object property too.
   1745   __ AssertSmi(r8);
   1746   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   1747   const int kLengthOffset =
   1748       JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
   1749   __ StoreP(r8, FieldMemOperand(r3, kLengthOffset), r0);
   1750 
   1751   // Set up the elements pointer in the allocated arguments object.
   1752   // If we allocated a parameter map, r7 will point there, otherwise
   1753   // it will point to the backing store.
   1754   __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
   1755   __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
   1756 
   1757   // r3 = address of new object (tagged)
   1758   // r5 = argument count (tagged)
   1759   // r7 = address of parameter map or backing store (tagged)
   1760   // r9 = mapped parameter count (tagged)
   1761   // Initialize parameter map. If there are no mapped arguments, we're done.
   1762   Label skip_parameter_map;
   1763   __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
   1764   if (CpuFeatures::IsSupported(ISELECT)) {
   1765     __ isel(eq, r4, r7, r4);
   1766     __ beq(&skip_parameter_map);
   1767   } else {
   1768     Label skip6;
   1769     __ bne(&skip6);
   1770     // Move backing store address to r4, because it is
   1771     // expected there when filling in the unmapped arguments.
   1772     __ mr(r4, r7);
   1773     __ b(&skip_parameter_map);
   1774     __ bind(&skip6);
   1775   }
   1776 
   1777   __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
   1778   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
   1779   __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
   1780   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
   1781   __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
   1782             r0);
   1783   __ SmiToPtrArrayOffset(r8, r9);
   1784   __ add(r8, r8, r7);
   1785   __ addi(r8, r8, Operand(kParameterMapHeaderSize));
   1786   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
   1787             r0);
   1788 
   1789   // Copy the parameter slots and the holes in the arguments.
   1790   // We need to fill in mapped_parameter_count slots. They index the context,
   1791   // where parameters are stored in reverse order, at
   1792   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
   1793   // The mapped parameter thus need to get indices
   1794   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
   1795   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
   1796   // We loop from right to left.
   1797   Label parameters_loop;
   1798   __ mr(r8, r9);
   1799   __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
   1800   __ sub(r11, r11, r9);
   1801   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   1802   __ SmiToPtrArrayOffset(r4, r8);
   1803   __ add(r4, r4, r7);
   1804   __ addi(r4, r4, Operand(kParameterMapHeaderSize));
   1805 
   1806   // r4 = address of backing store (tagged)
   1807   // r7 = address of parameter map (tagged)
   1808   // r8 = temporary scratch (a.o., for address calculation)
   1809   // r10 = temporary scratch (a.o., for address calculation)
   1810   // ip = the hole value
   1811   __ SmiUntag(r8);
   1812   __ mtctr(r8);
   1813   __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
   1814   __ add(r10, r4, r8);
   1815   __ add(r8, r7, r8);
   1816   __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   1817   __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
   1818 
   1819   __ bind(&parameters_loop);
   1820   __ StorePU(r11, MemOperand(r8, -kPointerSize));
   1821   __ StorePU(ip, MemOperand(r10, -kPointerSize));
   1822   __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
   1823   __ bdnz(&parameters_loop);
   1824 
   1825   // Restore r8 = argument count (tagged).
   1826   __ LoadP(r8, FieldMemOperand(r3, kLengthOffset));
   1827 
   1828   __ bind(&skip_parameter_map);
   1829   // r3 = address of new object (tagged)
   1830   // r4 = address of backing store (tagged)
   1831   // r8 = argument count (tagged)
   1832   // r9 = mapped parameter count (tagged)
   1833   // r11 = scratch
   1834   // Copy arguments header and remaining slots (if there are any).
   1835   __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
   1836   __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
   1837   __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
   1838   __ sub(r11, r8, r9, LeaveOE, SetRC);
   1839   __ Ret(eq, cr0);
   1840 
   1841   Label arguments_loop;
   1842   __ SmiUntag(r11);
   1843   __ mtctr(r11);
   1844 
   1845   __ SmiToPtrArrayOffset(r0, r9);
   1846   __ sub(r6, r6, r0);
   1847   __ add(r11, r4, r0);
   1848   __ addi(r11, r11,
   1849           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
   1850 
   1851   __ bind(&arguments_loop);
   1852   __ LoadPU(r7, MemOperand(r6, -kPointerSize));
   1853   __ StorePU(r7, MemOperand(r11, kPointerSize));
   1854   __ bdnz(&arguments_loop);
   1855 
   1856   // Return.
   1857   __ Ret();
   1858 
   1859   // Do the runtime call to allocate the arguments object.
   1860   // r8 = argument count (tagged)
   1861   __ bind(&runtime);
   1862   __ Push(r4, r6, r8);
   1863   __ TailCallRuntime(Runtime::kNewSloppyArguments);
   1864 }
   1865 
   1866 
   1867 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   1868   // Return address is in lr.
   1869   Label slow;
   1870 
   1871   Register receiver = LoadDescriptor::ReceiverRegister();
   1872   Register key = LoadDescriptor::NameRegister();
   1873 
   1874   // Check that the key is an array index, that is Uint32.
   1875   __ TestIfPositiveSmi(key, r0);
   1876   __ bne(&slow, cr0);
   1877 
   1878   // Everything is fine, call runtime.
   1879   __ Push(receiver, key);  // Receiver, key.
   1880 
   1881   // Perform tail call to the entry.
   1882   __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
   1883 
   1884   __ bind(&slow);
   1885   PropertyAccessCompiler::TailCallBuiltin(
   1886       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
   1887 }
   1888 
   1889 
   1890 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
   1891   // r4 : function
   1892   // r5 : number of parameters (tagged)
   1893   // r6 : parameters pointer
   1894 
   1895   DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
   1896   DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
   1897   DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
   1898 
   1899   // Check if the calling frame is an arguments adaptor frame.
   1900   Label try_allocate, runtime;
   1901   __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1902   __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
   1903   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   1904   __ bne(&try_allocate);
   1905 
   1906   // Patch the arguments.length and the parameters pointer.
   1907   __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1908   __ SmiToPtrArrayOffset(r6, r5);
   1909   __ add(r6, r6, r7);
   1910   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
   1911 
   1912   // Try the new space allocation. Start out with computing the size
   1913   // of the arguments object and the elements array in words.
   1914   Label add_arguments_object;
   1915   __ bind(&try_allocate);
   1916   __ SmiUntag(r11, r5, SetRC);
   1917   __ beq(&add_arguments_object, cr0);
   1918   __ addi(r11, r11, Operand(FixedArray::kHeaderSize / kPointerSize));
   1919   __ bind(&add_arguments_object);
   1920   __ addi(r11, r11, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
   1921 
   1922   // Do the allocation of both objects in one go.
   1923   __ Allocate(r11, r3, r7, r8, &runtime,
   1924               static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
   1925 
   1926   // Get the arguments boilerplate from the current native context.
   1927   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
   1928 
   1929   __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
   1930   __ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
   1931   __ StoreP(r8, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
   1932   __ StoreP(r8, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
   1933 
   1934   // Get the length (smi tagged) and set that as an in-object property too.
   1935   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   1936   __ AssertSmi(r5);
   1937   __ StoreP(r5,
   1938             FieldMemOperand(r3, JSObject::kHeaderSize +
   1939                                     Heap::kArgumentsLengthIndex * kPointerSize),
   1940             r0);
   1941 
   1942   // If there are no actual arguments, we're done.
   1943   __ SmiUntag(r9, r5, SetRC);
   1944   __ Ret(eq, cr0);
   1945 
   1946   // Set up the elements pointer in the allocated arguments object and
   1947   // initialize the header in the elements fixed array.
   1948   __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
   1949   __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
   1950   __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
   1951   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
   1952   __ StoreP(r5, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
   1953 
   1954   // Copy the fixed array slots.
   1955   Label loop;
   1956   // Set up r7 to point just prior to the first array slot.
   1957   __ addi(r7, r7,
   1958           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
   1959   __ mtctr(r9);
   1960   __ bind(&loop);
   1961   // Pre-decrement r6 with kPointerSize on each iteration.
   1962   // Pre-decrement in order to skip receiver.
   1963   __ LoadPU(r8, MemOperand(r6, -kPointerSize));
   1964   // Pre-increment r7 with kPointerSize on each iteration.
   1965   __ StorePU(r8, MemOperand(r7, kPointerSize));
   1966   __ bdnz(&loop);
   1967 
   1968   // Return.
   1969   __ Ret();
   1970 
   1971   // Do the runtime call to allocate the arguments object.
   1972   __ bind(&runtime);
   1973   __ Push(r4, r6, r5);
   1974   __ TailCallRuntime(Runtime::kNewStrictArguments);
   1975 }
   1976 
   1977 
   1978 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
   1979   // r5 : number of parameters (tagged)
   1980   // r6 : parameters pointer
   1981   // r7 : rest parameter index (tagged)
   1982 
   1983   Label runtime;
   1984   __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1985   __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
   1986   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   1987   __ bne(&runtime);
   1988 
   1989   // Patch the arguments.length and the parameters pointer.
   1990   __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
   1991   __ SmiToPtrArrayOffset(r0, r5);
   1992   __ add(r6, r8, r0);
   1993   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
   1994 
   1995   __ bind(&runtime);
   1996   __ Push(r5, r6, r7);
   1997   __ TailCallRuntime(Runtime::kNewRestParam);
   1998 }
   1999 
   2000 
   2001 void RegExpExecStub::Generate(MacroAssembler* masm) {
   2002 // Just jump directly to runtime if native RegExp is not selected at compile
   2003 // time or if regexp entry in generated code is turned off runtime switch or
   2004 // at compilation.
   2005 #ifdef V8_INTERPRETED_REGEXP
   2006   __ TailCallRuntime(Runtime::kRegExpExec);
   2007 #else  // V8_INTERPRETED_REGEXP
   2008 
   2009   // Stack frame on entry.
   2010   //  sp[0]: last_match_info (expected JSArray)
   2011   //  sp[4]: previous index
   2012   //  sp[8]: subject string
   2013   //  sp[12]: JSRegExp object
   2014 
   2015   const int kLastMatchInfoOffset = 0 * kPointerSize;
   2016   const int kPreviousIndexOffset = 1 * kPointerSize;
   2017   const int kSubjectOffset = 2 * kPointerSize;
   2018   const int kJSRegExpOffset = 3 * kPointerSize;
   2019 
   2020   Label runtime, br_over, encoding_type_UC16;
   2021 
   2022   // Allocation of registers for this function. These are in callee save
   2023   // registers and will be preserved by the call to the native RegExp code, as
   2024   // this code is called using the normal C calling convention. When calling
   2025   // directly from generated code the native RegExp code will not do a GC and
   2026   // therefore the content of these registers are safe to use after the call.
   2027   Register subject = r14;
   2028   Register regexp_data = r15;
   2029   Register last_match_info_elements = r16;
   2030   Register code = r17;
   2031 
   2032   // Ensure register assigments are consistent with callee save masks
   2033   DCHECK(subject.bit() & kCalleeSaved);
   2034   DCHECK(regexp_data.bit() & kCalleeSaved);
   2035   DCHECK(last_match_info_elements.bit() & kCalleeSaved);
   2036   DCHECK(code.bit() & kCalleeSaved);
   2037 
   2038   // Ensure that a RegExp stack is allocated.
   2039   ExternalReference address_of_regexp_stack_memory_address =
   2040       ExternalReference::address_of_regexp_stack_memory_address(isolate());
   2041   ExternalReference address_of_regexp_stack_memory_size =
   2042       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   2043   __ mov(r3, Operand(address_of_regexp_stack_memory_size));
   2044   __ LoadP(r3, MemOperand(r3, 0));
   2045   __ cmpi(r3, Operand::Zero());
   2046   __ beq(&runtime);
   2047 
   2048   // Check that the first argument is a JSRegExp object.
   2049   __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
   2050   __ JumpIfSmi(r3, &runtime);
   2051   __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
   2052   __ bne(&runtime);
   2053 
   2054   // Check that the RegExp has been compiled (data contains a fixed array).
   2055   __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
   2056   if (FLAG_debug_code) {
   2057     __ TestIfSmi(regexp_data, r0);
   2058     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
   2059     __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
   2060     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   2061   }
   2062 
   2063   // regexp_data: RegExp data (FixedArray)
   2064   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   2065   __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   2066   // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
   2067   __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
   2068   __ bne(&runtime);
   2069 
   2070   // regexp_data: RegExp data (FixedArray)
   2071   // Check that the number of captures fit in the static offsets vector buffer.
   2072   __ LoadP(r5,
   2073            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   2074   // Check (number_of_captures + 1) * 2 <= offsets vector size
   2075   // Or          number_of_captures * 2 <= offsets vector size - 2
   2076   // SmiToShortArrayOffset accomplishes the multiplication by 2 and
   2077   // SmiUntag (which is a nop for 32-bit).
   2078   __ SmiToShortArrayOffset(r5, r5);
   2079   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   2080   __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
   2081   __ bgt(&runtime);
   2082 
   2083   // Reset offset for possibly sliced string.
   2084   __ li(r11, Operand::Zero());
   2085   __ LoadP(subject, MemOperand(sp, kSubjectOffset));
   2086   __ JumpIfSmi(subject, &runtime);
   2087   __ mr(r6, subject);  // Make a copy of the original subject string.
   2088   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
   2089   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
   2090   // subject: subject string
   2091   // r6: subject string
   2092   // r3: subject string instance type
   2093   // regexp_data: RegExp data (FixedArray)
   2094   // Handle subject string according to its encoding and representation:
   2095   // (1) Sequential string?  If yes, go to (5).
   2096   // (2) Anything but sequential or cons?  If yes, go to (6).
   2097   // (3) Cons string.  If the string is flat, replace subject with first string.
   2098   //     Otherwise bailout.
   2099   // (4) Is subject external?  If yes, go to (7).
   2100   // (5) Sequential string.  Load regexp code according to encoding.
   2101   // (E) Carry on.
   2102   /// [...]
   2103 
   2104   // Deferred code at the end of the stub:
   2105   // (6) Not a long external string?  If yes, go to (8).
   2106   // (7) External string.  Make it, offset-wise, look like a sequential string.
   2107   //     Go to (5).
   2108   // (8) Short external string or not a string?  If yes, bail out to runtime.
   2109   // (9) Sliced string.  Replace subject with parent.  Go to (4).
   2110 
   2111   Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */,
   2112       not_seq_nor_cons /* 6 */, not_long_external /* 8 */;
   2113 
   2114   // (1) Sequential string?  If yes, go to (5).
   2115   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
   2116                  kShortExternalStringMask) == 0x93);
   2117   __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
   2118                           kShortExternalStringMask));
   2119   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   2120   __ beq(&seq_string, cr0);  // Go to (5).
   2121 
   2122   // (2) Anything but sequential or cons?  If yes, go to (6).
   2123   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   2124   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   2125   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   2126   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   2127   STATIC_ASSERT(kExternalStringTag < 0xffffu);
   2128   __ cmpi(r4, Operand(kExternalStringTag));
   2129   __ bge(&not_seq_nor_cons);  // Go to (6).
   2130 
   2131   // (3) Cons string.  Check that it's flat.
   2132   // Replace subject with first string and reload instance type.
   2133   __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
   2134   __ CompareRoot(r3, Heap::kempty_stringRootIndex);
   2135   __ bne(&runtime);
   2136   __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   2137 
   2138   // (4) Is subject external?  If yes, go to (7).
   2139   __ bind(&check_underlying);
   2140   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
   2141   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
   2142   STATIC_ASSERT(kSeqStringTag == 0);
   2143   STATIC_ASSERT(kStringRepresentationMask == 3);
   2144   __ andi(r0, r3, Operand(kStringRepresentationMask));
   2145   // The underlying external string is never a short external string.
   2146   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
   2147   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
   2148   __ bne(&external_string, cr0);  // Go to (7).
   2149 
   2150   // (5) Sequential string.  Load regexp code according to encoding.
   2151   __ bind(&seq_string);
   2152   // subject: sequential subject string (or look-alike, external string)
   2153   // r6: original subject string
   2154   // Load previous index and check range before r6 is overwritten.  We have to
   2155   // use r6 instead of subject here because subject might have been only made
   2156   // to look like a sequential string when it actually is an external string.
   2157   __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
   2158   __ JumpIfNotSmi(r4, &runtime);
   2159   __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
   2160   __ cmpl(r6, r4);
   2161   __ ble(&runtime);
   2162   __ SmiUntag(r4);
   2163 
   2164   STATIC_ASSERT(4 == kOneByteStringTag);
   2165   STATIC_ASSERT(kTwoByteStringTag == 0);
   2166   STATIC_ASSERT(kStringEncodingMask == 4);
   2167   __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
   2168   __ beq(&encoding_type_UC16, cr0);
   2169   __ LoadP(code,
   2170            FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
   2171   __ b(&br_over);
   2172   __ bind(&encoding_type_UC16);
   2173   __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   2174   __ bind(&br_over);
   2175 
   2176   // (E) Carry on.  String handling is done.
   2177   // code: irregexp code
   2178   // Check that the irregexp code has been generated for the actual string
   2179   // encoding. If it has, the field contains a code object otherwise it contains
   2180   // a smi (code flushing support).
   2181   __ JumpIfSmi(code, &runtime);
   2182 
   2183   // r4: previous index
   2184   // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
   2185   // code: Address of generated regexp code
   2186   // subject: Subject string
   2187   // regexp_data: RegExp data (FixedArray)
   2188   // All checks done. Now push arguments for native regexp code.
   2189   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
   2190 
   2191   // Isolates: note we add an additional parameter here (isolate pointer).
   2192   const int kRegExpExecuteArguments = 10;
   2193   const int kParameterRegisters = 8;
   2194   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
   2195 
   2196   // Stack pointer now points to cell where return address is to be written.
   2197   // Arguments are before that on the stack or in registers.
   2198 
   2199   // Argument 10 (in stack parameter area): Pass current isolate address.
   2200   __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
   2201   __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
   2202 
   2203   // Argument 9 is a dummy that reserves the space used for
   2204   // the return address added by the ExitFrame in native calls.
   2205 
   2206   // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
   2207   __ li(r10, Operand(1));
   2208 
   2209   // Argument 7 (r9): Start (high end) of backtracking stack memory area.
   2210   __ mov(r3, Operand(address_of_regexp_stack_memory_address));
   2211   __ LoadP(r3, MemOperand(r3, 0));
   2212   __ mov(r5, Operand(address_of_regexp_stack_memory_size));
   2213   __ LoadP(r5, MemOperand(r5, 0));
   2214   __ add(r9, r3, r5);
   2215 
   2216   // Argument 6 (r8): Set the number of capture registers to zero to force
   2217   // global egexps to behave as non-global.  This does not affect non-global
   2218   // regexps.
   2219   __ li(r8, Operand::Zero());
   2220 
   2221   // Argument 5 (r7): static offsets vector buffer.
   2222   __ mov(
   2223       r7,
   2224       Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
   2225 
   2226   // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
   2227   // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
   2228   __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   2229   __ xori(r6, r6, Operand(1));
   2230   // Load the length from the original subject string from the previous stack
   2231   // frame. Therefore we have to use fp, which points exactly to two pointer
   2232   // sizes below the previous sp. (Because creating a new stack frame pushes
   2233   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
   2234   __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   2235   // If slice offset is not 0, load the length from the original sliced string.
   2236   // Argument 4, r6: End of string data
   2237   // Argument 3, r5: Start of string data
   2238   // Prepare start and end index of the input.
   2239   __ ShiftLeft_(r11, r11, r6);
   2240   __ add(r11, r18, r11);
   2241   __ ShiftLeft_(r5, r4, r6);
   2242   __ add(r5, r11, r5);
   2243 
   2244   __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
   2245   __ SmiUntag(r18);
   2246   __ ShiftLeft_(r6, r18, r6);
   2247   __ add(r6, r11, r6);
   2248 
   2249   // Argument 2 (r4): Previous index.
   2250   // Already there
   2251 
   2252   // Argument 1 (r3): Subject string.
   2253   __ mr(r3, subject);
   2254 
   2255   // Locate the code entry and call it.
   2256   __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
   2257 
   2258 
   2259 #if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
   2260   // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
   2261   // RegExp routine.  Extract the instruction address here since
   2262   // DirectCEntryStub::GenerateCall will not do it for calls out to
   2263   // what it thinks is C code compiled for the simulator/host
   2264   // platform.
   2265   __ LoadP(code, MemOperand(code, 0));  // Instruction address
   2266 #endif
   2267 
   2268   DirectCEntryStub stub(isolate());
   2269   stub.GenerateCall(masm, code);
   2270 
   2271   __ LeaveExitFrame(false, no_reg, true);
   2272 
   2273   // r3: result (int32)
   2274   // subject: subject string (callee saved)
   2275   // regexp_data: RegExp data (callee saved)
   2276   // last_match_info_elements: Last match info elements (callee saved)
   2277   // Check the result.
   2278   Label success;
   2279   __ cmpwi(r3, Operand(1));
   2280   // We expect exactly one result since we force the called regexp to behave
   2281   // as non-global.
   2282   __ beq(&success);
   2283   Label failure;
   2284   __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
   2285   __ beq(&failure);
   2286   __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   2287   // If not exception it can only be retry. Handle that in the runtime system.
   2288   __ bne(&runtime);
   2289   // Result must now be exception. If there is no pending exception already a
   2290   // stack overflow (on the backtrack stack) was detected in RegExp code but
   2291   // haven't created the exception yet. Handle that in the runtime system.
   2292   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   2293   __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
   2294   __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   2295                                        isolate())));
   2296   __ LoadP(r3, MemOperand(r5, 0));
   2297   __ cmp(r3, r4);
   2298   __ beq(&runtime);
   2299 
   2300   // For exception, throw the exception again.
   2301   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
   2302 
   2303   __ bind(&failure);
   2304   // For failure and exception return null.
   2305   __ mov(r3, Operand(isolate()->factory()->null_value()));
   2306   __ addi(sp, sp, Operand(4 * kPointerSize));
   2307   __ Ret();
   2308 
   2309   // Process the result from the native regexp code.
   2310   __ bind(&success);
   2311   __ LoadP(r4,
   2312            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   2313   // Calculate number of capture registers (number_of_captures + 1) * 2.
   2314   // SmiToShortArrayOffset accomplishes the multiplication by 2 and
   2315   // SmiUntag (which is a nop for 32-bit).
   2316   __ SmiToShortArrayOffset(r4, r4);
   2317   __ addi(r4, r4, Operand(2));
   2318 
   2319   __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
   2320   __ JumpIfSmi(r3, &runtime);
   2321   __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE);
   2322   __ bne(&runtime);
   2323   // Check that the JSArray is in fast case.
   2324   __ LoadP(last_match_info_elements,
   2325            FieldMemOperand(r3, JSArray::kElementsOffset));
   2326   __ LoadP(r3,
   2327            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   2328   __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
   2329   __ bne(&runtime);
   2330   // Check that the last match info has space for the capture registers and the
   2331   // additional information.
   2332   __ LoadP(
   2333       r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
   2334   __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
   2335   __ SmiUntag(r0, r3);
   2336   __ cmp(r5, r0);
   2337   __ bgt(&runtime);
   2338 
   2339   // r4: number of capture registers
   2340   // subject: subject string
   2341   // Store the capture count.
   2342   __ SmiTag(r5, r4);
   2343   __ StoreP(r5, FieldMemOperand(last_match_info_elements,
   2344                                 RegExpImpl::kLastCaptureCountOffset),
   2345             r0);
   2346   // Store last subject and last input.
   2347   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
   2348                                      RegExpImpl::kLastSubjectOffset),
   2349             r0);
   2350   __ mr(r5, subject);
   2351   __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
   2352                       subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
   2353   __ mr(subject, r5);
   2354   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
   2355                                      RegExpImpl::kLastInputOffset),
   2356             r0);
   2357   __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
   2358                       subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
   2359 
   2360   // Get the static offsets vector filled by the native regexp code.
   2361   ExternalReference address_of_static_offsets_vector =
   2362       ExternalReference::address_of_static_offsets_vector(isolate());
   2363   __ mov(r5, Operand(address_of_static_offsets_vector));
   2364 
   2365   // r4: number of capture registers
   2366   // r5: offsets vector
   2367   Label next_capture;
   2368   // Capture register counter starts from number of capture registers and
   2369   // counts down until wraping after zero.
   2370   __ addi(
   2371       r3, last_match_info_elements,
   2372       Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
   2373   __ addi(r5, r5, Operand(-kIntSize));  // bias down for lwzu
   2374   __ mtctr(r4);
   2375   __ bind(&next_capture);
   2376   // Read the value from the static offsets vector buffer.
   2377   __ lwzu(r6, MemOperand(r5, kIntSize));
   2378   // Store the smi value in the last match info.
   2379   __ SmiTag(r6);
   2380   __ StorePU(r6, MemOperand(r3, kPointerSize));
   2381   __ bdnz(&next_capture);
   2382 
   2383   // Return last match info.
   2384   __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
   2385   __ addi(sp, sp, Operand(4 * kPointerSize));
   2386   __ Ret();
   2387 
   2388   // Do the runtime call to execute the regexp.
   2389   __ bind(&runtime);
   2390   __ TailCallRuntime(Runtime::kRegExpExec);
   2391 
   2392   // Deferred code for string handling.
   2393   // (6) Not a long external string?  If yes, go to (8).
   2394   __ bind(&not_seq_nor_cons);
   2395   // Compare flags are still set.
   2396   __ bgt(&not_long_external);  // Go to (8).
   2397 
   2398   // (7) External string.  Make it, offset-wise, look like a sequential string.
   2399   __ bind(&external_string);
   2400   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
   2401   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
   2402   if (FLAG_debug_code) {
   2403     // Assert that we do not have a cons or slice (indirect strings) here.
   2404     // Sequential strings have already been ruled out.
   2405     STATIC_ASSERT(kIsIndirectStringMask == 1);
   2406     __ andi(r0, r3, Operand(kIsIndirectStringMask));
   2407     __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
   2408   }
   2409   __ LoadP(subject,
   2410            FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   2411   // Move the pointer so that offset-wise, it looks like a sequential string.
   2412   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   2413   __ subi(subject, subject,
   2414           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   2415   __ b(&seq_string);  // Go to (5).
   2416 
   2417   // (8) Short external string or not a string?  If yes, bail out to runtime.
   2418   __ bind(&not_long_external);
   2419   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
   2420   __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
   2421   __ bne(&runtime, cr0);
   2422 
   2423   // (9) Sliced string.  Replace subject with parent.  Go to (4).
   2424   // Load offset into r11 and replace subject string with parent.
   2425   __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   2426   __ SmiUntag(r11);
   2427   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   2428   __ b(&check_underlying);  // Go to (4).
   2429 #endif  // V8_INTERPRETED_REGEXP
   2430 }
   2431 
   2432 
   2433 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
   2434   // r3 : number of arguments to the construct function
   2435   // r4 : the function to call
   2436   // r5 : feedback vector
   2437   // r6 : slot in feedback vector (Smi)
   2438   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2439 
   2440   // Number-of-arguments register must be smi-tagged to call out.
   2441   __ SmiTag(r3);
   2442   __ Push(r6, r5, r4, r3);
   2443 
   2444   __ CallStub(stub);
   2445 
   2446   __ Pop(r6, r5, r4, r3);
   2447   __ SmiUntag(r3);
   2448 }
   2449 
   2450 
   2451 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   2452   // Cache the called function in a feedback vector slot.  Cache states
   2453   // are uninitialized, monomorphic (indicated by a JSFunction), and
   2454   // megamorphic.
   2455   // r3 : number of arguments to the construct function
   2456   // r4 : the function to call
   2457   // r5 : feedback vector
   2458   // r6 : slot in feedback vector (Smi)
   2459   Label initialize, done, miss, megamorphic, not_array_function;
   2460 
   2461   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
   2462             masm->isolate()->heap()->megamorphic_symbol());
   2463   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
   2464             masm->isolate()->heap()->uninitialized_symbol());
   2465 
   2466   // Load the cache state into r8.
   2467   __ SmiToPtrArrayOffset(r8, r6);
   2468   __ add(r8, r5, r8);
   2469   __ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize));
   2470 
   2471   // A monomorphic cache hit or an already megamorphic state: invoke the
   2472   // function without changing the state.
   2473   // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
   2474   // this position in a symbol (see static asserts in type-feedback-vector.h).
   2475   Label check_allocation_site;
   2476   Register feedback_map = r9;
   2477   Register weak_value = r10;
   2478   __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
   2479   __ cmp(r4, weak_value);
   2480   __ beq(&done);
   2481   __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
   2482   __ beq(&done);
   2483   __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
   2484   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
   2485   __ bne(&check_allocation_site);
   2486 
   2487   // If the weak cell is cleared, we have a new chance to become monomorphic.
   2488   __ JumpIfSmi(weak_value, &initialize);
   2489   __ b(&megamorphic);
   2490 
   2491   __ bind(&check_allocation_site);
   2492   // If we came here, we need to see if we are the array function.
   2493   // If we didn't have a matching function, and we didn't find the megamorph
   2494   // sentinel, then we have in the slot either some other function or an
   2495   // AllocationSite.
   2496   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
   2497   __ bne(&miss);
   2498 
   2499   // Make sure the function is the Array() function
   2500   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
   2501   __ cmp(r4, r8);
   2502   __ bne(&megamorphic);
   2503   __ b(&done);
   2504 
   2505   __ bind(&miss);
   2506 
   2507   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   2508   // megamorphic.
   2509   __ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
   2510   __ beq(&initialize);
   2511   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   2512   // write-barrier is needed.
   2513   __ bind(&megamorphic);
   2514   __ SmiToPtrArrayOffset(r8, r6);
   2515   __ add(r8, r5, r8);
   2516   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
   2517   __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
   2518   __ jmp(&done);
   2519 
   2520   // An uninitialized cache is patched with the function
   2521   __ bind(&initialize);
   2522 
   2523   // Make sure the function is the Array() function.
   2524   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
   2525   __ cmp(r4, r8);
   2526   __ bne(&not_array_function);
   2527 
   2528   // The target function is the Array constructor,
   2529   // Create an AllocationSite if we don't already have it, store it in the
   2530   // slot.
   2531   CreateAllocationSiteStub create_stub(masm->isolate());
   2532   CallStubInRecordCallTarget(masm, &create_stub);
   2533   __ b(&done);
   2534 
   2535   __ bind(&not_array_function);
   2536 
   2537   CreateWeakCellStub weak_cell_stub(masm->isolate());
   2538   CallStubInRecordCallTarget(masm, &weak_cell_stub);
   2539   __ bind(&done);
   2540 }
   2541 
   2542 
   2543 void CallConstructStub::Generate(MacroAssembler* masm) {
   2544   // r3 : number of arguments
   2545   // r4 : the function to call
   2546   // r5 : feedback vector
   2547   // r6 : slot in feedback vector (Smi, for RecordCallTarget)
   2548 
   2549   Label non_function;
   2550   // Check that the function is not a smi.
   2551   __ JumpIfSmi(r4, &non_function);
   2552   // Check that the function is a JSFunction.
   2553   __ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
   2554   __ bne(&non_function);
   2555 
   2556   GenerateRecordCallTarget(masm);
   2557 
   2558   __ SmiToPtrArrayOffset(r8, r6);
   2559   __ add(r8, r5, r8);
   2560   // Put the AllocationSite from the feedback vector into r5, or undefined.
   2561   __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
   2562   __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
   2563   __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
   2564   if (CpuFeatures::IsSupported(ISELECT)) {
   2565     __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
   2566     __ isel(eq, r5, r5, r8);
   2567   } else {
   2568     Label feedback_register_initialized;
   2569     __ beq(&feedback_register_initialized);
   2570     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
   2571     __ bind(&feedback_register_initialized);
   2572   }
   2573 
   2574   __ AssertUndefinedOrAllocationSite(r5, r8);
   2575 
   2576   // Pass function as new target.
   2577   __ mr(r6, r4);
   2578 
   2579   // Tail call to the function-specific construct stub (still in the caller
   2580   // context at this point).
   2581   __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   2582   __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
   2583   __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
   2584   __ JumpToJSEntry(ip);
   2585 
   2586   __ bind(&non_function);
   2587   __ mr(r6, r4);
   2588   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   2589 }
   2590 
   2591 
   2592 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
   2593   // r4 - function
   2594   // r6 - slot id
   2595   // r5 - vector
   2596   // r7 - allocation site (loaded from vector[slot])
   2597   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
   2598   __ cmp(r4, r8);
   2599   __ bne(miss);
   2600 
   2601   __ mov(r3, Operand(arg_count()));
   2602 
   2603   // Increment the call count for monomorphic function calls.
   2604   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
   2605   __ SmiToPtrArrayOffset(r8, r6);
   2606   __ add(r5, r5, r8);
   2607   __ LoadP(r6, FieldMemOperand(r5, count_offset));
   2608   __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
   2609   __ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
   2610 
   2611   __ mr(r5, r7);
   2612   __ mr(r6, r4);
   2613   ArrayConstructorStub stub(masm->isolate(), arg_count());
   2614   __ TailCallStub(&stub);
   2615 }
   2616 
   2617 
   2618 void CallICStub::Generate(MacroAssembler* masm) {
   2619   // r4 - function
   2620   // r6 - slot id (Smi)
   2621   // r5 - vector
   2622   Label extra_checks_or_miss, call, call_function;
   2623   int argc = arg_count();
   2624   ParameterCount actual(argc);
   2625 
   2626   // The checks. First, does r4 match the recorded monomorphic target?
   2627   __ SmiToPtrArrayOffset(r9, r6);
   2628   __ add(r9, r5, r9);
   2629   __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
   2630 
   2631   // We don't know that we have a weak cell. We might have a private symbol
   2632   // or an AllocationSite, but the memory is safe to examine.
   2633   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
   2634   // FixedArray.
   2635   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
   2636   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
   2637   // computed, meaning that it can't appear to be a pointer. If the low bit is
   2638   // 0, then hash is computed, but the 0 bit prevents the field from appearing
   2639   // to be a pointer.
   2640   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
   2641   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
   2642                     WeakCell::kValueOffset &&
   2643                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
   2644 
   2645   __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
   2646   __ cmp(r4, r8);
   2647   __ bne(&extra_checks_or_miss);
   2648 
   2649   // The compare above could have been a SMI/SMI comparison. Guard against this
   2650   // convincing us that we have a monomorphic JSFunction.
   2651   __ JumpIfSmi(r4, &extra_checks_or_miss);
   2652 
   2653   // Increment the call count for monomorphic function calls.
   2654   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
   2655   __ LoadP(r6, FieldMemOperand(r9, count_offset));
   2656   __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
   2657   __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
   2658 
   2659   __ bind(&call_function);
   2660   __ mov(r3, Operand(argc));
   2661   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
   2662           RelocInfo::CODE_TARGET);
   2663 
   2664   __ bind(&extra_checks_or_miss);
   2665   Label uninitialized, miss, not_allocation_site;
   2666 
   2667   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
   2668   __ beq(&call);
   2669 
   2670   // Verify that r7 contains an AllocationSite
   2671   __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
   2672   __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
   2673   __ bne(&not_allocation_site);
   2674 
   2675   // We have an allocation site.
   2676   HandleArrayCase(masm, &miss);
   2677 
   2678   __ bind(&not_allocation_site);
   2679 
   2680   // The following cases attempt to handle MISS cases without going to the
   2681   // runtime.
   2682   if (FLAG_trace_ic) {
   2683     __ b(&miss);
   2684   }
   2685 
   2686   __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
   2687   __ beq(&uninitialized);
   2688 
   2689   // We are going megamorphic. If the feedback is a JSFunction, it is fine
   2690   // to handle it here. More complex cases are dealt with in the runtime.
   2691   __ AssertNotSmi(r7);
   2692   __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
   2693   __ bne(&miss);
   2694   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
   2695   __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
   2696 
   2697   __ bind(&call);
   2698   __ mov(r3, Operand(argc));
   2699   __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
   2700           RelocInfo::CODE_TARGET);
   2701 
   2702   __ bind(&uninitialized);
   2703 
   2704   // We are going monomorphic, provided we actually have a JSFunction.
   2705   __ JumpIfSmi(r4, &miss);
   2706 
   2707   // Goto miss case if we do not have a function.
   2708   __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
   2709   __ bne(&miss);
   2710 
   2711   // Make sure the function is not the Array() function, which requires special
   2712   // behavior on MISS.
   2713   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
   2714   __ cmp(r4, r7);
   2715   __ beq(&miss);
   2716 
   2717   // Make sure the function belongs to the same native context.
   2718   __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
   2719   __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
   2720   __ LoadP(ip, NativeContextMemOperand());
   2721   __ cmp(r7, ip);
   2722   __ bne(&miss);
   2723 
   2724   // Initialize the call counter.
   2725   __ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
   2726   __ StoreP(r8, FieldMemOperand(r9, count_offset), r0);
   2727 
   2728   // Store the function. Use a stub since we need a frame for allocation.
   2729   // r5 - vector
   2730   // r6 - slot
   2731   // r4 - function
   2732   {
   2733     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2734     CreateWeakCellStub create_stub(masm->isolate());
   2735     __ Push(r4);
   2736     __ CallStub(&create_stub);
   2737     __ Pop(r4);
   2738   }
   2739 
   2740   __ b(&call_function);
   2741 
   2742   // We are here because tracing is on or we encountered a MISS case we can't
   2743   // handle here.
   2744   __ bind(&miss);
   2745   GenerateMiss(masm);
   2746 
   2747   __ b(&call);
   2748 }
   2749 
   2750 
   2751 void CallICStub::GenerateMiss(MacroAssembler* masm) {
   2752   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2753 
   2754   // Push the function and feedback info.
   2755   __ Push(r4, r5, r6);
   2756 
   2757   // Call the entry.
   2758   __ CallRuntime(Runtime::kCallIC_Miss);
   2759 
   2760   // Move result to r4 and exit the internal frame.
   2761   __ mr(r4, r3);
   2762 }
   2763 
   2764 
   2765 // StringCharCodeAtGenerator
   2766 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   2767   // If the receiver is a smi trigger the non-string case.
   2768   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
   2769     __ JumpIfSmi(object_, receiver_not_string_);
   2770 
   2771     // Fetch the instance type of the receiver into result register.
   2772     __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2773     __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2774     // If the receiver is not a string trigger the non-string case.
   2775     __ andi(r0, result_, Operand(kIsNotStringMask));
   2776     __ bne(receiver_not_string_, cr0);
   2777   }
   2778 
   2779   // If the index is non-smi trigger the non-smi case.
   2780   __ JumpIfNotSmi(index_, &index_not_smi_);
   2781   __ bind(&got_smi_index_);
   2782 
   2783   // Check for index out of range.
   2784   __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
   2785   __ cmpl(ip, index_);
   2786   __ ble(index_out_of_range_);
   2787 
   2788   __ SmiUntag(index_);
   2789 
   2790   StringCharLoadGenerator::Generate(masm, object_, index_, result_,
   2791                                     &call_runtime_);
   2792 
   2793   __ SmiTag(result_);
   2794   __ bind(&exit_);
   2795 }
   2796 
   2797 
   2798 void StringCharCodeAtGenerator::GenerateSlow(
   2799     MacroAssembler* masm, EmbedMode embed_mode,
   2800     const RuntimeCallHelper& call_helper) {
   2801   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   2802 
   2803   // Index is not a smi.
   2804   __ bind(&index_not_smi_);
   2805   // If index is a heap number, try converting it to an integer.
   2806   __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
   2807               DONT_DO_SMI_CHECK);
   2808   call_helper.BeforeCall(masm);
   2809   if (embed_mode == PART_OF_IC_HANDLER) {
   2810     __ Push(LoadWithVectorDescriptor::VectorRegister(),
   2811             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
   2812   } else {
   2813     // index_ is consumed by runtime conversion function.
   2814     __ Push(object_, index_);
   2815   }
   2816   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
   2817     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
   2818   } else {
   2819     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
   2820     // NumberToSmi discards numbers that are not exact integers.
   2821     __ CallRuntime(Runtime::kNumberToSmi);
   2822   }
   2823   // Save the conversion result before the pop instructions below
   2824   // have a chance to overwrite it.
   2825   __ Move(index_, r3);
   2826   if (embed_mode == PART_OF_IC_HANDLER) {
   2827     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
   2828            LoadWithVectorDescriptor::SlotRegister(), object_);
   2829   } else {
   2830     __ pop(object_);
   2831   }
   2832   // Reload the instance type.
   2833   __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   2834   __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   2835   call_helper.AfterCall(masm);
   2836   // If index is still not a smi, it must be out of range.
   2837   __ JumpIfNotSmi(index_, index_out_of_range_);
   2838   // Otherwise, return to the fast path.
   2839   __ b(&got_smi_index_);
   2840 
   2841   // Call runtime. We get here when the receiver is a string and the
   2842   // index is a number, but the code of getting the actual character
   2843   // is too complex (e.g., when the string needs to be flattened).
   2844   __ bind(&call_runtime_);
   2845   call_helper.BeforeCall(masm);
   2846   __ SmiTag(index_);
   2847   __ Push(object_, index_);
   2848   __ CallRuntime(Runtime::kStringCharCodeAtRT);
   2849   __ Move(result_, r3);
   2850   call_helper.AfterCall(masm);
   2851   __ b(&exit_);
   2852 
   2853   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   2854 }
   2855 
   2856 
   2857 // -------------------------------------------------------------------------
   2858 // StringCharFromCodeGenerator
   2859 
   2860 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   2861   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   2862   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
   2863   __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
   2864   __ ori(r0, r0, Operand(kSmiTagMask));
   2865   __ and_(r0, code_, r0, SetRC);
   2866   __ bne(&slow_case_, cr0);
   2867 
   2868   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   2869   // At this point code register contains smi tagged one-byte char code.
   2870   __ mr(r0, code_);
   2871   __ SmiToPtrArrayOffset(code_, code_);
   2872   __ add(result_, result_, code_);
   2873   __ mr(code_, r0);
   2874   __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   2875   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
   2876   __ beq(&slow_case_);
   2877   __ bind(&exit_);
   2878 }
   2879 
   2880 
   2881 void StringCharFromCodeGenerator::GenerateSlow(
   2882     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   2883   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
   2884 
   2885   __ bind(&slow_case_);
   2886   call_helper.BeforeCall(masm);
   2887   __ push(code_);
   2888   __ CallRuntime(Runtime::kStringCharFromCode);
   2889   __ Move(result_, r3);
   2890   call_helper.AfterCall(masm);
   2891   __ b(&exit_);
   2892 
   2893   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
   2894 }
   2895 
   2896 
   2897 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
   2898 
   2899 
   2900 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
   2901                                           Register src, Register count,
   2902                                           Register scratch,
   2903                                           String::Encoding encoding) {
   2904   if (FLAG_debug_code) {
   2905     // Check that destination is word aligned.
   2906     __ andi(r0, dest, Operand(kPointerAlignmentMask));
   2907     __ Check(eq, kDestinationOfCopyNotAligned, cr0);
   2908   }
   2909 
   2910   // Nothing to do for zero characters.
   2911   Label done;
   2912   if (encoding == String::TWO_BYTE_ENCODING) {
   2913     // double the length
   2914     __ add(count, count, count, LeaveOE, SetRC);
   2915     __ beq(&done, cr0);
   2916   } else {
   2917     __ cmpi(count, Operand::Zero());
   2918     __ beq(&done);
   2919   }
   2920 
   2921   // Copy count bytes from src to dst.
   2922   Label byte_loop;
   2923   __ mtctr(count);
   2924   __ bind(&byte_loop);
   2925   __ lbz(scratch, MemOperand(src));
   2926   __ addi(src, src, Operand(1));
   2927   __ stb(scratch, MemOperand(dest));
   2928   __ addi(dest, dest, Operand(1));
   2929   __ bdnz(&byte_loop);
   2930 
   2931   __ bind(&done);
   2932 }
   2933 
   2934 
   2935 void SubStringStub::Generate(MacroAssembler* masm) {
   2936   Label runtime;
   2937 
   2938   // Stack frame on entry.
   2939   //  lr: return address
   2940   //  sp[0]: to
   2941   //  sp[4]: from
   2942   //  sp[8]: string
   2943 
   2944   // This stub is called from the native-call %_SubString(...), so
   2945   // nothing can be assumed about the arguments. It is tested that:
   2946   //  "string" is a sequential string,
   2947   //  both "from" and "to" are smis, and
   2948   //  0 <= from <= to <= string.length.
   2949   // If any of these assumptions fail, we call the runtime system.
   2950 
   2951   const int kToOffset = 0 * kPointerSize;
   2952   const int kFromOffset = 1 * kPointerSize;
   2953   const int kStringOffset = 2 * kPointerSize;
   2954 
   2955   __ LoadP(r5, MemOperand(sp, kToOffset));
   2956   __ LoadP(r6, MemOperand(sp, kFromOffset));
   2957 
   2958   // If either to or from had the smi tag bit set, then fail to generic runtime
   2959   __ JumpIfNotSmi(r5, &runtime);
   2960   __ JumpIfNotSmi(r6, &runtime);
   2961   __ SmiUntag(r5);
   2962   __ SmiUntag(r6, SetRC);
   2963   // Both r5 and r6 are untagged integers.
   2964 
   2965   // We want to bailout to runtime here if From is negative.
   2966   __ blt(&runtime, cr0);  // From < 0.
   2967 
   2968   __ cmpl(r6, r5);
   2969   __ bgt(&runtime);  // Fail if from > to.
   2970   __ sub(r5, r5, r6);
   2971 
   2972   // Make sure first argument is a string.
   2973   __ LoadP(r3, MemOperand(sp, kStringOffset));
   2974   __ JumpIfSmi(r3, &runtime);
   2975   Condition is_string = masm->IsObjectStringType(r3, r4);
   2976   __ b(NegateCondition(is_string), &runtime, cr0);
   2977 
   2978   Label single_char;
   2979   __ cmpi(r5, Operand(1));
   2980   __ b(eq, &single_char);
   2981 
   2982   // Short-cut for the case of trivial substring.
   2983   Label return_r3;
   2984   // r3: original string
   2985   // r5: result string length
   2986   __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset));
   2987   __ SmiUntag(r0, r7);
   2988   __ cmpl(r5, r0);
   2989   // Return original string.
   2990   __ beq(&return_r3);
   2991   // Longer than original string's length or negative: unsafe arguments.
   2992   __ bgt(&runtime);
   2993   // Shorter than original string's length: an actual substring.
   2994 
   2995   // Deal with different string types: update the index if necessary
   2996   // and put the underlying string into r8.
   2997   // r3: original string
   2998   // r4: instance type
   2999   // r5: length
   3000   // r6: from index (untagged)
   3001   Label underlying_unpacked, sliced_string, seq_or_external_string;
   3002   // If the string is not indirect, it can only be sequential or external.
   3003   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
   3004   STATIC_ASSERT(kIsIndirectStringMask != 0);
   3005   __ andi(r0, r4, Operand(kIsIndirectStringMask));
   3006   __ beq(&seq_or_external_string, cr0);
   3007 
   3008   __ andi(r0, r4, Operand(kSlicedNotConsMask));
   3009   __ bne(&sliced_string, cr0);
   3010   // Cons string.  Check whether it is flat, then fetch first part.
   3011   __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset));
   3012   __ CompareRoot(r8, Heap::kempty_stringRootIndex);
   3013   __ bne(&runtime);
   3014   __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset));
   3015   // Update instance type.
   3016   __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
   3017   __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   3018   __ b(&underlying_unpacked);
   3019 
   3020   __ bind(&sliced_string);
   3021   // Sliced string.  Fetch parent and correct start index by offset.
   3022   __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset));
   3023   __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset));
   3024   __ SmiUntag(r4, r7);
   3025   __ add(r6, r6, r4);  // Add offset to index.
   3026   // Update instance type.
   3027   __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
   3028   __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   3029   __ b(&underlying_unpacked);
   3030 
   3031   __ bind(&seq_or_external_string);
   3032   // Sequential or external string.  Just move string to the expected register.
   3033   __ mr(r8, r3);
   3034 
   3035   __ bind(&underlying_unpacked);
   3036 
   3037   if (FLAG_string_slices) {
   3038     Label copy_routine;
   3039     // r8: underlying subject string
   3040     // r4: instance type of underlying subject string
   3041     // r5: length
   3042     // r6: adjusted start index (untagged)
   3043     __ cmpi(r5, Operand(SlicedString::kMinLength));
   3044     // Short slice.  Copy instead of slicing.
   3045     __ blt(&copy_routine);
   3046     // Allocate new sliced string.  At this point we do not reload the instance
   3047     // type including the string encoding because we simply rely on the info
   3048     // provided by the original string.  It does not matter if the original
   3049     // string's encoding is wrong because we always have to recheck encoding of
   3050     // the newly created string's parent anyways due to externalized strings.
   3051     Label two_byte_slice, set_slice_header;
   3052     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   3053     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   3054     __ andi(r0, r4, Operand(kStringEncodingMask));
   3055     __ beq(&two_byte_slice, cr0);
   3056     __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime);
   3057     __ b(&set_slice_header);
   3058     __ bind(&two_byte_slice);
   3059     __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime);
   3060     __ bind(&set_slice_header);
   3061     __ SmiTag(r6);
   3062     __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0);
   3063     __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0);
   3064     __ b(&return_r3);
   3065 
   3066     __ bind(&copy_routine);
   3067   }
   3068 
   3069   // r8: underlying subject string
   3070   // r4: instance type of underlying subject string
   3071   // r5: length
   3072   // r6: adjusted start index (untagged)
   3073   Label two_byte_sequential, sequential_string, allocate_result;
   3074   STATIC_ASSERT(kExternalStringTag != 0);
   3075   STATIC_ASSERT(kSeqStringTag == 0);
   3076   __ andi(r0, r4, Operand(kExternalStringTag));
   3077   __ beq(&sequential_string, cr0);
   3078 
   3079   // Handle external string.
   3080   // Rule out short external strings.
   3081   STATIC_ASSERT(kShortExternalStringTag != 0);
   3082   __ andi(r0, r4, Operand(kShortExternalStringTag));
   3083   __ bne(&runtime, cr0);
   3084   __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset));
   3085   // r8 already points to the first character of underlying string.
   3086   __ b(&allocate_result);
   3087 
   3088   __ bind(&sequential_string);
   3089   // Locate first character of underlying subject string.
   3090   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   3091   __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   3092 
   3093   __ bind(&allocate_result);
   3094   // Sequential acii string.  Allocate the result.
   3095   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   3096   __ andi(r0, r4, Operand(kStringEncodingMask));
   3097   __ beq(&two_byte_sequential, cr0);
   3098 
   3099   // Allocate and copy the resulting one-byte string.
   3100   __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime);
   3101 
   3102   // Locate first character of substring to copy.
   3103   __ add(r8, r8, r6);
   3104   // Locate first character of result.
   3105   __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   3106 
   3107   // r3: result string
   3108   // r4: first character of result string
   3109   // r5: result string length
   3110   // r8: first character of substring to copy
   3111   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3112   StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
   3113                                        String::ONE_BYTE_ENCODING);
   3114   __ b(&return_r3);
   3115 
   3116   // Allocate and copy the resulting two-byte string.
   3117   __ bind(&two_byte_sequential);
   3118   __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime);
   3119 
   3120   // Locate first character of substring to copy.
   3121   __ ShiftLeftImm(r4, r6, Operand(1));
   3122   __ add(r8, r8, r4);
   3123   // Locate first character of result.
   3124   __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   3125 
   3126   // r3: result string.
   3127   // r4: first character of result.
   3128   // r5: result length.
   3129   // r8: first character of substring to copy.
   3130   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3131   StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
   3132                                        String::TWO_BYTE_ENCODING);
   3133 
   3134   __ bind(&return_r3);
   3135   Counters* counters = isolate()->counters();
   3136   __ IncrementCounter(counters->sub_string_native(), 1, r6, r7);
   3137   __ Drop(3);
   3138   __ Ret();
   3139 
   3140   // Just jump to runtime to create the sub string.
   3141   __ bind(&runtime);
   3142   __ TailCallRuntime(Runtime::kSubString);
   3143 
   3144   __ bind(&single_char);
   3145   // r3: original string
   3146   // r4: instance type
   3147   // r5: length
   3148   // r6: from index (untagged)
   3149   __ SmiTag(r6, r6);
   3150   StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
   3151                                   STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
   3152   generator.GenerateFast(masm);
   3153   __ Drop(3);
   3154   __ Ret();
   3155   generator.SkipSlow(masm, &runtime);
   3156 }
   3157 
   3158 
   3159 void ToNumberStub::Generate(MacroAssembler* masm) {
   3160   // The ToNumber stub takes one argument in r3.
   3161   Label not_smi;
   3162   __ JumpIfNotSmi(r3, &not_smi);
   3163   __ blr();
   3164   __ bind(&not_smi);
   3165 
   3166   __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
   3167   // r3: receiver
   3168   // r4: receiver instance type
   3169   __ Ret(eq);
   3170 
   3171   Label not_string, slow_string;
   3172   __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
   3173   __ bge(&not_string);
   3174   // Check if string has a cached array index.
   3175   __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
   3176   __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
   3177   __ bne(&slow_string, cr0);
   3178   __ IndexFromHash(r5, r3);
   3179   __ blr();
   3180   __ bind(&slow_string);
   3181   __ push(r3);  // Push argument.
   3182   __ TailCallRuntime(Runtime::kStringToNumber);
   3183   __ bind(&not_string);
   3184 
   3185   Label not_oddball;
   3186   __ cmpi(r4, Operand(ODDBALL_TYPE));
   3187   __ bne(&not_oddball);
   3188   __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
   3189   __ blr();
   3190   __ bind(&not_oddball);
   3191 
   3192   __ push(r3);  // Push argument.
   3193   __ TailCallRuntime(Runtime::kToNumber);
   3194 }
   3195 
   3196 
   3197 void ToLengthStub::Generate(MacroAssembler* masm) {
   3198   // The ToLength stub takes one argument in r3.
   3199   Label not_smi;
   3200   __ JumpIfNotSmi(r3, &not_smi);
   3201   STATIC_ASSERT(kSmiTag == 0);
   3202   __ cmpi(r3, Operand::Zero());
   3203   if (CpuFeatures::IsSupported(ISELECT)) {
   3204     __ isel(lt, r3, r0, r3);
   3205   } else {
   3206     Label positive;
   3207     __ bgt(&positive);
   3208     __ li(r3, Operand::Zero());
   3209     __ bind(&positive);
   3210   }
   3211   __ Ret();
   3212   __ bind(&not_smi);
   3213 
   3214   __ push(r3);  // Push argument.
   3215   __ TailCallRuntime(Runtime::kToLength);
   3216 }
   3217 
   3218 
   3219 void ToStringStub::Generate(MacroAssembler* masm) {
   3220   // The ToString stub takes one argument in r3.
   3221   Label is_number;
   3222   __ JumpIfSmi(r3, &is_number);
   3223 
   3224   __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
   3225   // r3: receiver
   3226   // r4: receiver instance type
   3227   __ Ret(lt);
   3228 
   3229   Label not_heap_number;
   3230   __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
   3231   __ bne(&not_heap_number);
   3232   __ bind(&is_number);
   3233   NumberToStringStub stub(isolate());
   3234   __ TailCallStub(&stub);
   3235   __ bind(&not_heap_number);
   3236 
   3237   Label not_oddball;
   3238   __ cmpi(r4, Operand(ODDBALL_TYPE));
   3239   __ bne(&not_oddball);
   3240   __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
   3241   __ Ret();
   3242   __ bind(&not_oddball);
   3243 
   3244   __ push(r3);  // Push argument.
   3245   __ TailCallRuntime(Runtime::kToString);
   3246 }
   3247 
   3248 
   3249 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
   3250                                                    Register left,
   3251                                                    Register right,
   3252                                                    Register scratch1,
   3253                                                    Register scratch2) {
   3254   Register length = scratch1;
   3255 
   3256   // Compare lengths.
   3257   Label strings_not_equal, check_zero_length;
   3258   __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
   3259   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
   3260   __ cmp(length, scratch2);
   3261   __ beq(&check_zero_length);
   3262   __ bind(&strings_not_equal);
   3263   __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
   3264   __ Ret();
   3265 
   3266   // Check if the length is zero.
   3267   Label compare_chars;
   3268   __ bind(&check_zero_length);
   3269   STATIC_ASSERT(kSmiTag == 0);
   3270   __ cmpi(length, Operand::Zero());
   3271   __ bne(&compare_chars);
   3272   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
   3273   __ Ret();
   3274 
   3275   // Compare characters.
   3276   __ bind(&compare_chars);
   3277   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
   3278                                   &strings_not_equal);
   3279 
   3280   // Characters are equal.
   3281   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
   3282   __ Ret();
   3283 }
   3284 
   3285 
   3286 void StringHelper::GenerateCompareFlatOneByteStrings(
   3287     MacroAssembler* masm, Register left, Register right, Register scratch1,
   3288     Register scratch2, Register scratch3) {
   3289   Label result_not_equal, compare_lengths;
   3290   // Find minimum length and length difference.
   3291   __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
   3292   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
   3293   __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
   3294   Register length_delta = scratch3;
   3295   if (CpuFeatures::IsSupported(ISELECT)) {
   3296     __ isel(gt, scratch1, scratch2, scratch1, cr0);
   3297   } else {
   3298     Label skip;
   3299     __ ble(&skip, cr0);
   3300     __ mr(scratch1, scratch2);
   3301     __ bind(&skip);
   3302   }
   3303   Register min_length = scratch1;
   3304   STATIC_ASSERT(kSmiTag == 0);
   3305   __ cmpi(min_length, Operand::Zero());
   3306   __ beq(&compare_lengths);
   3307 
   3308   // Compare loop.
   3309   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
   3310                                   &result_not_equal);
   3311 
   3312   // Compare lengths - strings up to min-length are equal.
   3313   __ bind(&compare_lengths);
   3314   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   3315   // Use length_delta as result if it's zero.
   3316   __ mr(r3, length_delta);
   3317   __ cmpi(r3, Operand::Zero());
   3318   __ bind(&result_not_equal);
   3319   // Conditionally update the result based either on length_delta or
   3320   // the last comparion performed in the loop above.
   3321   if (CpuFeatures::IsSupported(ISELECT)) {
   3322     __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
   3323     __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
   3324     __ isel(eq, r3, r0, r4);
   3325     __ isel(lt, r3, r5, r3);
   3326     __ Ret();
   3327   } else {
   3328     Label less_equal, equal;
   3329     __ ble(&less_equal);
   3330     __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
   3331     __ Ret();
   3332     __ bind(&less_equal);
   3333     __ beq(&equal);
   3334     __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
   3335     __ bind(&equal);
   3336     __ Ret();
   3337   }
   3338 }
   3339 
   3340 
   3341 void StringHelper::GenerateOneByteCharsCompareLoop(
   3342     MacroAssembler* masm, Register left, Register right, Register length,
   3343     Register scratch1, Label* chars_not_equal) {
   3344   // Change index to run from -length to -1 by adding length to string
   3345   // start. This means that loop ends when index reaches zero, which
   3346   // doesn't need an additional compare.
   3347   __ SmiUntag(length);
   3348   __ addi(scratch1, length,
   3349           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   3350   __ add(left, left, scratch1);
   3351   __ add(right, right, scratch1);
   3352   __ subfic(length, length, Operand::Zero());
   3353   Register index = length;  // index = -length;
   3354 
   3355   // Compare loop.
   3356   Label loop;
   3357   __ bind(&loop);
   3358   __ lbzx(scratch1, MemOperand(left, index));
   3359   __ lbzx(r0, MemOperand(right, index));
   3360   __ cmp(scratch1, r0);
   3361   __ bne(chars_not_equal);
   3362   __ addi(index, index, Operand(1));
   3363   __ cmpi(index, Operand::Zero());
   3364   __ bne(&loop);
   3365 }
   3366 
   3367 
   3368 void StringCompareStub::Generate(MacroAssembler* masm) {
   3369   // ----------- S t a t e -------------
   3370   //  -- r4    : left
   3371   //  -- r3    : right
   3372   //  -- lr    : return address
   3373   // -----------------------------------
   3374   __ AssertString(r4);
   3375   __ AssertString(r3);
   3376 
   3377   Label not_same;
   3378   __ cmp(r3, r4);
   3379   __ bne(&not_same);
   3380   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
   3381   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
   3382                       r5);
   3383   __ Ret();
   3384 
   3385   __ bind(&not_same);
   3386 
   3387   // Check that both objects are sequential one-byte strings.
   3388   Label runtime;
   3389   __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
   3390 
   3391   // Compare flat one-byte strings natively.
   3392   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
   3393                       r6);
   3394   StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
   3395 
   3396   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   3397   // tagged as a small integer.
   3398   __ bind(&runtime);
   3399   __ Push(r4, r3);
   3400   __ TailCallRuntime(Runtime::kStringCompare);
   3401 }
   3402 
   3403 
   3404 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   3405   // ----------- S t a t e -------------
   3406   //  -- r4    : left
   3407   //  -- r3    : right
   3408   //  -- lr    : return address
   3409   // -----------------------------------
   3410 
   3411   // Load r5 with the allocation site.  We stick an undefined dummy value here
   3412   // and replace it with the real allocation site later when we instantiate this
   3413   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   3414   __ Move(r5, handle(isolate()->heap()->undefined_value()));
   3415 
   3416   // Make sure that we actually patched the allocation site.
   3417   if (FLAG_debug_code) {
   3418     __ TestIfSmi(r5, r0);
   3419     __ Assert(ne, kExpectedAllocationSite, cr0);
   3420     __ push(r5);
   3421     __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
   3422     __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
   3423     __ cmp(r5, ip);
   3424     __ pop(r5);
   3425     __ Assert(eq, kExpectedAllocationSite);
   3426   }
   3427 
   3428   // Tail call into the stub that handles binary operations with allocation
   3429   // sites.
   3430   BinaryOpWithAllocationSiteStub stub(isolate(), state());
   3431   __ TailCallStub(&stub);
   3432 }
   3433 
   3434 
   3435 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   3436   DCHECK_EQ(CompareICState::BOOLEAN, state());
   3437   Label miss;
   3438 
   3439   __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   3440   __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   3441   if (op() != Token::EQ_STRICT && is_strong(strength())) {
   3442     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   3443   } else {
   3444     if (!Token::IsEqualityOp(op())) {
   3445       __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
   3446       __ AssertSmi(r4);
   3447       __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
   3448       __ AssertSmi(r3);
   3449     }
   3450     __ sub(r3, r4, r3);
   3451     __ Ret();
   3452   }
   3453 
   3454   __ bind(&miss);
   3455   GenerateMiss(masm);
   3456 }
   3457 
   3458 
   3459 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   3460   DCHECK(state() == CompareICState::SMI);
   3461   Label miss;
   3462   __ orx(r5, r4, r3);
   3463   __ JumpIfNotSmi(r5, &miss);
   3464 
   3465   if (GetCondition() == eq) {
   3466     // For equality we do not care about the sign of the result.
   3467     // __ sub(r3, r3, r4, SetCC);
   3468     __ sub(r3, r3, r4);
   3469   } else {
   3470     // Untag before subtracting to avoid handling overflow.
   3471     __ SmiUntag(r4);
   3472     __ SmiUntag(r3);
   3473     __ sub(r3, r4, r3);
   3474   }
   3475   __ Ret();
   3476 
   3477   __ bind(&miss);
   3478   GenerateMiss(masm);
   3479 }
   3480 
   3481 
   3482 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
   3483   DCHECK(state() == CompareICState::NUMBER);
   3484 
   3485   Label generic_stub;
   3486   Label unordered, maybe_undefined1, maybe_undefined2;
   3487   Label miss;
   3488   Label equal, less_than;
   3489 
   3490   if (left() == CompareICState::SMI) {
   3491     __ JumpIfNotSmi(r4, &miss);
   3492   }
   3493   if (right() == CompareICState::SMI) {
   3494     __ JumpIfNotSmi(r3, &miss);
   3495   }
   3496 
   3497   // Inlining the double comparison and falling back to the general compare
   3498   // stub if NaN is involved.
   3499   // Load left and right operand.
   3500   Label done, left, left_smi, right_smi;
   3501   __ JumpIfSmi(r3, &right_smi);
   3502   __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
   3503               DONT_DO_SMI_CHECK);
   3504   __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
   3505   __ b(&left);
   3506   __ bind(&right_smi);
   3507   __ SmiToDouble(d1, r3);
   3508 
   3509   __ bind(&left);
   3510   __ JumpIfSmi(r4, &left_smi);
   3511   __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
   3512               DONT_DO_SMI_CHECK);
   3513   __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
   3514   __ b(&done);
   3515   __ bind(&left_smi);
   3516   __ SmiToDouble(d0, r4);
   3517 
   3518   __ bind(&done);
   3519 
   3520   // Compare operands
   3521   __ fcmpu(d0, d1);
   3522 
   3523   // Don't base result on status bits when a NaN is involved.
   3524   __ bunordered(&unordered);
   3525 
   3526   // Return a result of -1, 0, or 1, based on status bits.
   3527   if (CpuFeatures::IsSupported(ISELECT)) {
   3528     DCHECK(EQUAL == 0);
   3529     __ li(r4, Operand(GREATER));
   3530     __ li(r5, Operand(LESS));
   3531     __ isel(eq, r3, r0, r4);
   3532     __ isel(lt, r3, r5, r3);
   3533     __ Ret();
   3534   } else {
   3535     __ beq(&equal);
   3536     __ blt(&less_than);
   3537     //  assume greater than
   3538     __ li(r3, Operand(GREATER));
   3539     __ Ret();
   3540     __ bind(&equal);
   3541     __ li(r3, Operand(EQUAL));
   3542     __ Ret();
   3543     __ bind(&less_than);
   3544     __ li(r3, Operand(LESS));
   3545     __ Ret();
   3546   }
   3547 
   3548   __ bind(&unordered);
   3549   __ bind(&generic_stub);
   3550   CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
   3551                      CompareICState::GENERIC, CompareICState::GENERIC);
   3552   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   3553 
   3554   __ bind(&maybe_undefined1);
   3555   if (Token::IsOrderedRelationalCompareOp(op())) {
   3556     __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
   3557     __ bne(&miss);
   3558     __ JumpIfSmi(r4, &unordered);
   3559     __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
   3560     __ bne(&maybe_undefined2);
   3561     __ b(&unordered);
   3562   }
   3563 
   3564   __ bind(&maybe_undefined2);
   3565   if (Token::IsOrderedRelationalCompareOp(op())) {
   3566     __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
   3567     __ beq(&unordered);
   3568   }
   3569 
   3570   __ bind(&miss);
   3571   GenerateMiss(masm);
   3572 }
   3573 
   3574 
   3575 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   3576   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   3577   Label miss, not_equal;
   3578 
   3579   // Registers containing left and right operands respectively.
   3580   Register left = r4;
   3581   Register right = r3;
   3582   Register tmp1 = r5;
   3583   Register tmp2 = r6;
   3584 
   3585   // Check that both operands are heap objects.
   3586   __ JumpIfEitherSmi(left, right, &miss);
   3587 
   3588   // Check that both operands are symbols.
   3589   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   3590   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   3591   __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   3592   __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   3593   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   3594   __ orx(tmp1, tmp1, tmp2);
   3595   __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   3596   __ bne(&miss, cr0);
   3597 
   3598   // Internalized strings are compared by identity.
   3599   __ cmp(left, right);
   3600   __ bne(&not_equal);
   3601   // Make sure r3 is non-zero. At this point input operands are
   3602   // guaranteed to be non-zero.
   3603   DCHECK(right.is(r3));
   3604   STATIC_ASSERT(EQUAL == 0);
   3605   STATIC_ASSERT(kSmiTag == 0);
   3606   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
   3607   __ bind(&not_equal);
   3608   __ Ret();
   3609 
   3610   __ bind(&miss);
   3611   GenerateMiss(masm);
   3612 }
   3613 
   3614 
   3615 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
   3616   DCHECK(state() == CompareICState::UNIQUE_NAME);
   3617   DCHECK(GetCondition() == eq);
   3618   Label miss;
   3619 
   3620   // Registers containing left and right operands respectively.
   3621   Register left = r4;
   3622   Register right = r3;
   3623   Register tmp1 = r5;
   3624   Register tmp2 = r6;
   3625 
   3626   // Check that both operands are heap objects.
   3627   __ JumpIfEitherSmi(left, right, &miss);
   3628 
   3629   // Check that both operands are unique names. This leaves the instance
   3630   // types loaded in tmp1 and tmp2.
   3631   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   3632   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   3633   __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   3634   __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   3635 
   3636   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
   3637   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
   3638 
   3639   // Unique names are compared by identity.
   3640   __ cmp(left, right);
   3641   __ bne(&miss);
   3642   // Make sure r3 is non-zero. At this point input operands are
   3643   // guaranteed to be non-zero.
   3644   DCHECK(right.is(r3));
   3645   STATIC_ASSERT(EQUAL == 0);
   3646   STATIC_ASSERT(kSmiTag == 0);
   3647   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
   3648   __ Ret();
   3649 
   3650   __ bind(&miss);
   3651   GenerateMiss(masm);
   3652 }
   3653 
   3654 
   3655 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
   3656   DCHECK(state() == CompareICState::STRING);
   3657   Label miss, not_identical, is_symbol;
   3658 
   3659   bool equality = Token::IsEqualityOp(op());
   3660 
   3661   // Registers containing left and right operands respectively.
   3662   Register left = r4;
   3663   Register right = r3;
   3664   Register tmp1 = r5;
   3665   Register tmp2 = r6;
   3666   Register tmp3 = r7;
   3667   Register tmp4 = r8;
   3668 
   3669   // Check that both operands are heap objects.
   3670   __ JumpIfEitherSmi(left, right, &miss);
   3671 
   3672   // Check that both operands are strings. This leaves the instance
   3673   // types loaded in tmp1 and tmp2.
   3674   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   3675   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   3676   __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   3677   __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   3678   STATIC_ASSERT(kNotStringTag != 0);
   3679   __ orx(tmp3, tmp1, tmp2);
   3680   __ andi(r0, tmp3, Operand(kIsNotStringMask));
   3681   __ bne(&miss, cr0);
   3682 
   3683   // Fast check for identical strings.
   3684   __ cmp(left, right);
   3685   STATIC_ASSERT(EQUAL == 0);
   3686   STATIC_ASSERT(kSmiTag == 0);
   3687   __ bne(&not_identical);
   3688   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
   3689   __ Ret();
   3690   __ bind(&not_identical);
   3691 
   3692   // Handle not identical strings.
   3693 
   3694   // Check that both strings are internalized strings. If they are, we're done
   3695   // because we already know they are not identical. We know they are both
   3696   // strings.
   3697   if (equality) {
   3698     DCHECK(GetCondition() == eq);
   3699     STATIC_ASSERT(kInternalizedTag == 0);
   3700     __ orx(tmp3, tmp1, tmp2);
   3701     __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
   3702     // Make sure r3 is non-zero. At this point input operands are
   3703     // guaranteed to be non-zero.
   3704     DCHECK(right.is(r3));
   3705     __ Ret(eq, cr0);
   3706   }
   3707 
   3708   // Check that both strings are sequential one-byte.
   3709   Label runtime;
   3710   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
   3711                                                     &runtime);
   3712 
   3713   // Compare flat one-byte strings. Returns when done.
   3714   if (equality) {
   3715     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
   3716                                                   tmp2);
   3717   } else {
   3718     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
   3719                                                     tmp2, tmp3);
   3720   }
   3721 
   3722   // Handle more complex cases in runtime.
   3723   __ bind(&runtime);
   3724   __ Push(left, right);
   3725   if (equality) {
   3726     __ TailCallRuntime(Runtime::kStringEquals);
   3727   } else {
   3728     __ TailCallRuntime(Runtime::kStringCompare);
   3729   }
   3730 
   3731   __ bind(&miss);
   3732   GenerateMiss(masm);
   3733 }
   3734 
   3735 
   3736 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
   3737   DCHECK_EQ(CompareICState::RECEIVER, state());
   3738   Label miss;
   3739   __ and_(r5, r4, r3);
   3740   __ JumpIfSmi(r5, &miss);
   3741 
   3742   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   3743   __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
   3744   __ blt(&miss);
   3745   __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
   3746   __ blt(&miss);
   3747 
   3748   DCHECK(GetCondition() == eq);
   3749   __ sub(r3, r3, r4);
   3750   __ Ret();
   3751 
   3752   __ bind(&miss);
   3753   GenerateMiss(masm);
   3754 }
   3755 
   3756 
   3757 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   3758   Label miss;
   3759   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   3760   __ and_(r5, r4, r3);
   3761   __ JumpIfSmi(r5, &miss);
   3762   __ GetWeakValue(r7, cell);
   3763   __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
   3764   __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
   3765   __ cmp(r5, r7);
   3766   __ bne(&miss);
   3767   __ cmp(r6, r7);
   3768   __ bne(&miss);
   3769 
   3770   if (Token::IsEqualityOp(op())) {
   3771     __ sub(r3, r3, r4);
   3772     __ Ret();
   3773   } else if (is_strong(strength())) {
   3774     __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   3775   } else {
   3776     if (op() == Token::LT || op() == Token::LTE) {
   3777       __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
   3778     } else {
   3779       __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
   3780     }
   3781     __ Push(r4, r3, r5);
   3782     __ TailCallRuntime(Runtime::kCompare);
   3783   }
   3784 
   3785   __ bind(&miss);
   3786   GenerateMiss(masm);
   3787 }
   3788 
   3789 
   3790 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   3791   {
   3792     // Call the runtime system in a fresh internal frame.
   3793     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   3794     __ Push(r4, r3);
   3795     __ Push(r4, r3);
   3796     __ LoadSmiLiteral(r0, Smi::FromInt(op()));
   3797     __ push(r0);
   3798     __ CallRuntime(Runtime::kCompareIC_Miss);
   3799     // Compute the entry point of the rewritten stub.
   3800     __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
   3801     // Restore registers.
   3802     __ Pop(r4, r3);
   3803   }
   3804 
   3805   __ JumpToJSEntry(r5);
   3806 }
   3807 
   3808 
   3809 // This stub is paired with DirectCEntryStub::GenerateCall
   3810 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   3811   // Place the return address on the stack, making the call
   3812   // GC safe. The RegExp backend also relies on this.
   3813   __ mflr(r0);
   3814   __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
   3815   __ Call(ip);  // Call the C++ function.
   3816   __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
   3817   __ mtlr(r0);
   3818   __ blr();
   3819 }
   3820 
   3821 
   3822 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
   3823 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
   3824   // Native AIX/PPC64 Linux use a function descriptor.
   3825   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
   3826   __ LoadP(ip, MemOperand(target, 0));  // Instruction address
   3827 #else
   3828   // ip needs to be set for DirectCEentryStub::Generate, and also
   3829   // for ABI_CALL_VIA_IP.
   3830   __ Move(ip, target);
   3831 #endif
   3832 
   3833   intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
   3834   __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
   3835   __ Call(r0);  // Call the stub.
   3836 }
   3837 
   3838 
   3839 void NameDictionaryLookupStub::GenerateNegativeLookup(
   3840     MacroAssembler* masm, Label* miss, Label* done, Register receiver,
   3841     Register properties, Handle<Name> name, Register scratch0) {
   3842   DCHECK(name->IsUniqueName());
   3843   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   3844   // not equal to the name and kProbes-th slot is not used (its name is the
   3845   // undefined value), it guarantees the hash table doesn't contain the
   3846   // property. It's true even if some slots represent deleted properties
   3847   // (their names are the hole value).
   3848   for (int i = 0; i < kInlinedProbes; i++) {
   3849     // scratch0 points to properties hash.
   3850     // Compute the masked index: (hash + i + i * i) & mask.
   3851     Register index = scratch0;
   3852     // Capacity is smi 2^n.
   3853     __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
   3854     __ subi(index, index, Operand(1));
   3855     __ LoadSmiLiteral(
   3856         ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
   3857     __ and_(index, index, ip);
   3858 
   3859     // Scale the index by multiplying by the entry size.
   3860     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3861     __ ShiftLeftImm(ip, index, Operand(1));
   3862     __ add(index, index, ip);  // index *= 3.
   3863 
   3864     Register entity_name = scratch0;
   3865     // Having undefined at this place means the name is not contained.
   3866     Register tmp = properties;
   3867     __ SmiToPtrArrayOffset(ip, index);
   3868     __ add(tmp, properties, ip);
   3869     __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   3870 
   3871     DCHECK(!tmp.is(entity_name));
   3872     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
   3873     __ cmp(entity_name, tmp);
   3874     __ beq(done);
   3875 
   3876     // Load the hole ready for use below:
   3877     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
   3878 
   3879     // Stop if found the property.
   3880     __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
   3881     __ beq(miss);
   3882 
   3883     Label good;
   3884     __ cmp(entity_name, tmp);
   3885     __ beq(&good);
   3886 
   3887     // Check if the entry name is not a unique name.
   3888     __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   3889     __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   3890     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
   3891     __ bind(&good);
   3892 
   3893     // Restore the properties.
   3894     __ LoadP(properties,
   3895              FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   3896   }
   3897 
   3898   const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
   3899                           r5.bit() | r4.bit() | r3.bit());
   3900 
   3901   __ mflr(r0);
   3902   __ MultiPush(spill_mask);
   3903 
   3904   __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   3905   __ mov(r4, Operand(Handle<Name>(name)));
   3906   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   3907   __ CallStub(&stub);
   3908   __ cmpi(r3, Operand::Zero());
   3909 
   3910   __ MultiPop(spill_mask);  // MultiPop does not touch condition flags
   3911   __ mtlr(r0);
   3912 
   3913   __ beq(done);
   3914   __ bne(miss);
   3915 }
   3916 
   3917 
   3918 // Probe the name dictionary in the |elements| register. Jump to the
   3919 // |done| label if a property with the given name is found. Jump to
   3920 // the |miss| label otherwise.
   3921 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
   3922 void NameDictionaryLookupStub::GeneratePositiveLookup(
   3923     MacroAssembler* masm, Label* miss, Label* done, Register elements,
   3924     Register name, Register scratch1, Register scratch2) {
   3925   DCHECK(!elements.is(scratch1));
   3926   DCHECK(!elements.is(scratch2));
   3927   DCHECK(!name.is(scratch1));
   3928   DCHECK(!name.is(scratch2));
   3929 
   3930   __ AssertName(name);
   3931 
   3932   // Compute the capacity mask.
   3933   __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
   3934   __ SmiUntag(scratch1);  // convert smi to int
   3935   __ subi(scratch1, scratch1, Operand(1));
   3936 
   3937   // Generate an unrolled loop that performs a few probes before
   3938   // giving up. Measurements done on Gmail indicate that 2 probes
   3939   // cover ~93% of loads from dictionaries.
   3940   for (int i = 0; i < kInlinedProbes; i++) {
   3941     // Compute the masked index: (hash + i + i * i) & mask.
   3942     __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
   3943     if (i > 0) {
   3944       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   3945       // the hash in a separate instruction. The value hash + i + i * i is right
   3946       // shifted in the following and instruction.
   3947       DCHECK(NameDictionary::GetProbeOffset(i) <
   3948              1 << (32 - Name::kHashFieldOffset));
   3949       __ addi(scratch2, scratch2,
   3950               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   3951     }
   3952     __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
   3953     __ and_(scratch2, scratch1, scratch2);
   3954 
   3955     // Scale the index by multiplying by the entry size.
   3956     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   3957     // scratch2 = scratch2 * 3.
   3958     __ ShiftLeftImm(ip, scratch2, Operand(1));
   3959     __ add(scratch2, scratch2, ip);
   3960 
   3961     // Check if the key is identical to the name.
   3962     __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
   3963     __ add(scratch2, elements, ip);
   3964     __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
   3965     __ cmp(name, ip);
   3966     __ beq(done);
   3967   }
   3968 
   3969   const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
   3970                           r5.bit() | r4.bit() | r3.bit()) &
   3971                          ~(scratch1.bit() | scratch2.bit());
   3972 
   3973   __ mflr(r0);
   3974   __ MultiPush(spill_mask);
   3975   if (name.is(r3)) {
   3976     DCHECK(!elements.is(r4));
   3977     __ mr(r4, name);
   3978     __ mr(r3, elements);
   3979   } else {
   3980     __ mr(r3, elements);
   3981     __ mr(r4, name);
   3982   }
   3983   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
   3984   __ CallStub(&stub);
   3985   __ cmpi(r3, Operand::Zero());
   3986   __ mr(scratch2, r5);
   3987   __ MultiPop(spill_mask);
   3988   __ mtlr(r0);
   3989 
   3990   __ bne(done);
   3991   __ beq(miss);
   3992 }
   3993 
   3994 
   3995 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   3996   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   3997   // we cannot call anything that could cause a GC from this stub.
   3998   // Registers:
   3999   //  result: NameDictionary to probe
   4000   //  r4: key
   4001   //  dictionary: NameDictionary to probe.
   4002   //  index: will hold an index of entry if lookup is successful.
   4003   //         might alias with result_.
   4004   // Returns:
   4005   //  result_ is zero if lookup failed, non zero otherwise.
   4006 
   4007   Register result = r3;
   4008   Register dictionary = r3;
   4009   Register key = r4;
   4010   Register index = r5;
   4011   Register mask = r6;
   4012   Register hash = r7;
   4013   Register undefined = r8;
   4014   Register entry_key = r9;
   4015   Register scratch = r9;
   4016 
   4017   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   4018 
   4019   __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
   4020   __ SmiUntag(mask);
   4021   __ subi(mask, mask, Operand(1));
   4022 
   4023   __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
   4024 
   4025   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   4026 
   4027   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   4028     // Compute the masked index: (hash + i + i * i) & mask.
   4029     // Capacity is smi 2^n.
   4030     if (i > 0) {
   4031       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   4032       // the hash in a separate instruction. The value hash + i + i * i is right
   4033       // shifted in the following and instruction.
   4034       DCHECK(NameDictionary::GetProbeOffset(i) <
   4035              1 << (32 - Name::kHashFieldOffset));
   4036       __ addi(index, hash,
   4037               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   4038     } else {
   4039       __ mr(index, hash);
   4040     }
   4041     __ srwi(r0, index, Operand(Name::kHashShift));
   4042     __ and_(index, mask, r0);
   4043 
   4044     // Scale the index by multiplying by the entry size.
   4045     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   4046     __ ShiftLeftImm(scratch, index, Operand(1));
   4047     __ add(index, index, scratch);  // index *= 3.
   4048 
   4049     __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
   4050     __ add(index, dictionary, scratch);
   4051     __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
   4052 
   4053     // Having undefined at this place means the name is not contained.
   4054     __ cmp(entry_key, undefined);
   4055     __ beq(&not_in_dictionary);
   4056 
   4057     // Stop if found the property.
   4058     __ cmp(entry_key, key);
   4059     __ beq(&in_dictionary);
   4060 
   4061     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
   4062       // Check if the entry name is not a unique name.
   4063       __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   4064       __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   4065       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
   4066     }
   4067   }
   4068 
   4069   __ bind(&maybe_in_dictionary);
   4070   // If we are doing negative lookup then probing failure should be
   4071   // treated as a lookup success. For positive lookup probing failure
   4072   // should be treated as lookup failure.
   4073   if (mode() == POSITIVE_LOOKUP) {
   4074     __ li(result, Operand::Zero());
   4075     __ Ret();
   4076   }
   4077 
   4078   __ bind(&in_dictionary);
   4079   __ li(result, Operand(1));
   4080   __ Ret();
   4081 
   4082   __ bind(&not_in_dictionary);
   4083   __ li(result, Operand::Zero());
   4084   __ Ret();
   4085 }
   4086 
   4087 
   4088 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
   4089     Isolate* isolate) {
   4090   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
   4091   stub1.GetCode();
   4092   // Hydrogen code stubs need stub2 at snapshot time.
   4093   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
   4094   stub2.GetCode();
   4095 }
   4096 
   4097 
   4098 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
   4099 // the value has just been written into the object, now this stub makes sure
   4100 // we keep the GC informed.  The word in the object where the value has been
   4101 // written is in the address register.
   4102 void RecordWriteStub::Generate(MacroAssembler* masm) {
   4103   Label skip_to_incremental_noncompacting;
   4104   Label skip_to_incremental_compacting;
   4105 
   4106   // The first two branch instructions are generated with labels so as to
   4107   // get the offset fixed up correctly by the bind(Label*) call.  We patch
   4108   // it back and forth between branch condition True and False
   4109   // when we start and stop incremental heap marking.
   4110   // See RecordWriteStub::Patch for details.
   4111 
   4112   // Clear the bit, branch on True for NOP action initially
   4113   __ crclr(Assembler::encode_crbit(cr2, CR_LT));
   4114   __ blt(&skip_to_incremental_noncompacting, cr2);
   4115   __ blt(&skip_to_incremental_compacting, cr2);
   4116 
   4117   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   4118     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   4119                            MacroAssembler::kReturnAtEnd);
   4120   }
   4121   __ Ret();
   4122 
   4123   __ bind(&skip_to_incremental_noncompacting);
   4124   GenerateIncremental(masm, INCREMENTAL);
   4125 
   4126   __ bind(&skip_to_incremental_compacting);
   4127   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   4128 
   4129   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
   4130   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
   4131   // patching not required on PPC as the initial path is effectively NOP
   4132 }
   4133 
   4134 
   4135 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   4136   regs_.Save(masm);
   4137 
   4138   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   4139     Label dont_need_remembered_set;
   4140 
   4141     __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
   4142     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
   4143                            regs_.scratch0(), &dont_need_remembered_set);
   4144 
   4145     __ CheckPageFlag(regs_.object(), regs_.scratch0(),
   4146                      1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
   4147                      &dont_need_remembered_set);
   4148 
   4149     // First notify the incremental marker if necessary, then update the
   4150     // remembered set.
   4151     CheckNeedsToInformIncrementalMarker(
   4152         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   4153     InformIncrementalMarker(masm);
   4154     regs_.Restore(masm);
   4155     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   4156                            MacroAssembler::kReturnAtEnd);
   4157 
   4158     __ bind(&dont_need_remembered_set);
   4159   }
   4160 
   4161   CheckNeedsToInformIncrementalMarker(
   4162       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   4163   InformIncrementalMarker(masm);
   4164   regs_.Restore(masm);
   4165   __ Ret();
   4166 }
   4167 
   4168 
   4169 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   4170   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   4171   int argument_count = 3;
   4172   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   4173   Register address =
   4174       r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
   4175   DCHECK(!address.is(regs_.object()));
   4176   DCHECK(!address.is(r3));
   4177   __ mr(address, regs_.address());
   4178   __ mr(r3, regs_.object());
   4179   __ mr(r4, address);
   4180   __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
   4181 
   4182   AllowExternalCallThatCantCauseGC scope(masm);
   4183   __ CallCFunction(
   4184       ExternalReference::incremental_marking_record_write_function(isolate()),
   4185       argument_count);
   4186   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
   4187 }
   4188 
   4189 
   4190 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   4191     MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
   4192     Mode mode) {
   4193   Label on_black;
   4194   Label need_incremental;
   4195   Label need_incremental_pop_scratch;
   4196 
   4197   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
   4198   __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
   4199   __ and_(regs_.scratch0(), regs_.object(), r0);
   4200   __ LoadP(
   4201       regs_.scratch1(),
   4202       MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
   4203   __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1));
   4204   __ StoreP(
   4205       regs_.scratch1(),
   4206       MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
   4207   __ cmpi(regs_.scratch1(), Operand::Zero());  // PPC, we could do better here
   4208   __ blt(&need_incremental);
   4209 
   4210   // Let's look at the color of the object:  If it is not black we don't have
   4211   // to inform the incremental marker.
   4212   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   4213 
   4214   regs_.Restore(masm);
   4215   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   4216     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   4217                            MacroAssembler::kReturnAtEnd);
   4218   } else {
   4219     __ Ret();
   4220   }
   4221 
   4222   __ bind(&on_black);
   4223 
   4224   // Get the value from the slot.
   4225   __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
   4226 
   4227   if (mode == INCREMENTAL_COMPACTION) {
   4228     Label ensure_not_white;
   4229 
   4230     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
   4231                      regs_.scratch1(),  // Scratch.
   4232                      MemoryChunk::kEvacuationCandidateMask, eq,
   4233                      &ensure_not_white);
   4234 
   4235     __ CheckPageFlag(regs_.object(),
   4236                      regs_.scratch1(),  // Scratch.
   4237                      MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
   4238                      &need_incremental);
   4239 
   4240     __ bind(&ensure_not_white);
   4241   }
   4242 
   4243   // We need extra registers for this, so we push the object and the address
   4244   // register temporarily.
   4245   __ Push(regs_.object(), regs_.address());
   4246   __ JumpIfWhite(regs_.scratch0(),  // The value.
   4247                  regs_.scratch1(),  // Scratch.
   4248                  regs_.object(),    // Scratch.
   4249                  regs_.address(),   // Scratch.
   4250                  &need_incremental_pop_scratch);
   4251   __ Pop(regs_.object(), regs_.address());
   4252 
   4253   regs_.Restore(masm);
   4254   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   4255     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   4256                            MacroAssembler::kReturnAtEnd);
   4257   } else {
   4258     __ Ret();
   4259   }
   4260 
   4261   __ bind(&need_incremental_pop_scratch);
   4262   __ Pop(regs_.object(), regs_.address());
   4263 
   4264   __ bind(&need_incremental);
   4265 
   4266   // Fall through when we need to inform the incremental marker.
   4267 }
   4268 
   4269 
   4270 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   4271   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   4272   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   4273   int parameter_count_offset =
   4274       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   4275   __ LoadP(r4, MemOperand(fp, parameter_count_offset));
   4276   if (function_mode() == JS_FUNCTION_STUB_MODE) {
   4277     __ addi(r4, r4, Operand(1));
   4278   }
   4279   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   4280   __ slwi(r4, r4, Operand(kPointerSizeLog2));
   4281   __ add(sp, sp, r4);
   4282   __ Ret();
   4283 }
   4284 
   4285 
   4286 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   4287   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   4288   LoadICStub stub(isolate(), state());
   4289   stub.GenerateForTrampoline(masm);
   4290 }
   4291 
   4292 
   4293 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   4294   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
   4295   KeyedLoadICStub stub(isolate(), state());
   4296   stub.GenerateForTrampoline(masm);
   4297 }
   4298 
   4299 
   4300 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   4301   __ EmitLoadTypeFeedbackVector(r5);
   4302   CallICStub stub(isolate(), state());
   4303   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   4304 }
   4305 
   4306 
   4307 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
   4308 
   4309 
   4310 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4311   GenerateImpl(masm, true);
   4312 }
   4313 
   4314 
   4315 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
   4316                              Register receiver_map, Register scratch1,
   4317                              Register scratch2, bool is_polymorphic,
   4318                              Label* miss) {
   4319   // feedback initially contains the feedback array
   4320   Label next_loop, prepare_next;
   4321   Label start_polymorphic;
   4322 
   4323   Register cached_map = scratch1;
   4324 
   4325   __ LoadP(cached_map,
   4326            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
   4327   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   4328   __ cmp(receiver_map, cached_map);
   4329   __ bne(&start_polymorphic);
   4330   // found, now call handler.
   4331   Register handler = feedback;
   4332   __ LoadP(handler,
   4333            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
   4334   __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   4335   __ Jump(ip);
   4336 
   4337 
   4338   Register length = scratch2;
   4339   __ bind(&start_polymorphic);
   4340   __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   4341   if (!is_polymorphic) {
   4342     // If the IC could be monomorphic we have to make sure we don't go past the
   4343     // end of the feedback array.
   4344     __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
   4345     __ beq(miss);
   4346   }
   4347 
   4348   Register too_far = length;
   4349   Register pointer_reg = feedback;
   4350 
   4351   // +-----+------+------+-----+-----+ ... ----+
   4352   // | map | len  | wm0  | h0  | wm1 |      hN |
   4353   // +-----+------+------+-----+-----+ ... ----+
   4354   //                 0      1     2        len-1
   4355   //                              ^              ^
   4356   //                              |              |
   4357   //                         pointer_reg      too_far
   4358   //                         aka feedback     scratch2
   4359   // also need receiver_map
   4360   // use cached_map (scratch1) to look in the weak map values.
   4361   __ SmiToPtrArrayOffset(r0, length);
   4362   __ add(too_far, feedback, r0);
   4363   __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   4364   __ addi(pointer_reg, feedback,
   4365           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
   4366 
   4367   __ bind(&next_loop);
   4368   __ LoadP(cached_map, MemOperand(pointer_reg));
   4369   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   4370   __ cmp(receiver_map, cached_map);
   4371   __ bne(&prepare_next);
   4372   __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
   4373   __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   4374   __ Jump(ip);
   4375 
   4376   __ bind(&prepare_next);
   4377   __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
   4378   __ cmp(pointer_reg, too_far);
   4379   __ blt(&next_loop);
   4380 
   4381   // We exhausted our array of map handler pairs.
   4382   __ b(miss);
   4383 }
   4384 
   4385 
   4386 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
   4387                                   Register receiver_map, Register feedback,
   4388                                   Register vector, Register slot,
   4389                                   Register scratch, Label* compare_map,
   4390                                   Label* load_smi_map, Label* try_array) {
   4391   __ JumpIfSmi(receiver, load_smi_map);
   4392   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   4393   __ bind(compare_map);
   4394   Register cached_map = scratch;
   4395   // Move the weak map into the weak_cell register.
   4396   __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
   4397   __ cmp(cached_map, receiver_map);
   4398   __ bne(try_array);
   4399   Register handler = feedback;
   4400   __ SmiToPtrArrayOffset(r0, slot);
   4401   __ add(handler, vector, r0);
   4402   __ LoadP(handler,
   4403            FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
   4404   __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
   4405   __ Jump(ip);
   4406 }
   4407 
   4408 
   4409 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4410   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r4
   4411   Register name = LoadWithVectorDescriptor::NameRegister();          // r5
   4412   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r6
   4413   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r3
   4414   Register feedback = r7;
   4415   Register receiver_map = r8;
   4416   Register scratch1 = r9;
   4417 
   4418   __ SmiToPtrArrayOffset(r0, slot);
   4419   __ add(feedback, vector, r0);
   4420   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4421 
   4422   // Try to quickly handle the monomorphic case without knowing for sure
   4423   // if we have a weak cell in feedback. We do know it's safe to look
   4424   // at WeakCell::kValueOffset.
   4425   Label try_array, load_smi_map, compare_map;
   4426   Label not_array, miss;
   4427   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4428                         scratch1, &compare_map, &load_smi_map, &try_array);
   4429 
   4430   // Is it a fixed array?
   4431   __ bind(&try_array);
   4432   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4433   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   4434   __ bne(&not_array);
   4435   HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
   4436 
   4437   __ bind(&not_array);
   4438   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   4439   __ bne(&miss);
   4440   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
   4441       Code::ComputeHandlerFlags(Code::LOAD_IC));
   4442   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
   4443                                                receiver, name, feedback,
   4444                                                receiver_map, scratch1, r10);
   4445 
   4446   __ bind(&miss);
   4447   LoadIC::GenerateMiss(masm);
   4448 
   4449   __ bind(&load_smi_map);
   4450   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   4451   __ b(&compare_map);
   4452 }
   4453 
   4454 
   4455 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
   4456   GenerateImpl(masm, false);
   4457 }
   4458 
   4459 
   4460 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4461   GenerateImpl(masm, true);
   4462 }
   4463 
   4464 
   4465 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4466   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r4
   4467   Register key = LoadWithVectorDescriptor::NameRegister();           // r5
   4468   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r6
   4469   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r3
   4470   Register feedback = r7;
   4471   Register receiver_map = r8;
   4472   Register scratch1 = r9;
   4473 
   4474   __ SmiToPtrArrayOffset(r0, slot);
   4475   __ add(feedback, vector, r0);
   4476   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4477 
   4478   // Try to quickly handle the monomorphic case without knowing for sure
   4479   // if we have a weak cell in feedback. We do know it's safe to look
   4480   // at WeakCell::kValueOffset.
   4481   Label try_array, load_smi_map, compare_map;
   4482   Label not_array, miss;
   4483   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4484                         scratch1, &compare_map, &load_smi_map, &try_array);
   4485 
   4486   __ bind(&try_array);
   4487   // Is it a fixed array?
   4488   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4489   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   4490   __ bne(&not_array);
   4491 
   4492   // We have a polymorphic element handler.
   4493   Label polymorphic, try_poly_name;
   4494   __ bind(&polymorphic);
   4495   HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
   4496 
   4497   __ bind(&not_array);
   4498   // Is it generic?
   4499   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   4500   __ bne(&try_poly_name);
   4501   Handle<Code> megamorphic_stub =
   4502       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   4503   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   4504 
   4505   __ bind(&try_poly_name);
   4506   // We might have a name in feedback, and a fixed array in the next slot.
   4507   __ cmp(key, feedback);
   4508   __ bne(&miss);
   4509   // If the name comparison succeeded, we know we have a fixed array with
   4510   // at least one map/handler pair.
   4511   __ SmiToPtrArrayOffset(r0, slot);
   4512   __ add(feedback, vector, r0);
   4513   __ LoadP(feedback,
   4514            FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   4515   HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
   4516 
   4517   __ bind(&miss);
   4518   KeyedLoadIC::GenerateMiss(masm);
   4519 
   4520   __ bind(&load_smi_map);
   4521   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   4522   __ b(&compare_map);
   4523 }
   4524 
   4525 
   4526 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   4527   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   4528   VectorStoreICStub stub(isolate(), state());
   4529   stub.GenerateForTrampoline(masm);
   4530 }
   4531 
   4532 
   4533 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
   4534   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
   4535   VectorKeyedStoreICStub stub(isolate(), state());
   4536   stub.GenerateForTrampoline(masm);
   4537 }
   4538 
   4539 
   4540 void VectorStoreICStub::Generate(MacroAssembler* masm) {
   4541   GenerateImpl(masm, false);
   4542 }
   4543 
   4544 
   4545 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4546   GenerateImpl(masm, true);
   4547 }
   4548 
   4549 
   4550 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4551   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r4
   4552   Register key = VectorStoreICDescriptor::NameRegister();           // r5
   4553   Register vector = VectorStoreICDescriptor::VectorRegister();      // r6
   4554   Register slot = VectorStoreICDescriptor::SlotRegister();          // r7
   4555   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3));          // r3
   4556   Register feedback = r8;
   4557   Register receiver_map = r9;
   4558   Register scratch1 = r10;
   4559 
   4560   __ SmiToPtrArrayOffset(r0, slot);
   4561   __ add(feedback, vector, r0);
   4562   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4563 
   4564   // Try to quickly handle the monomorphic case without knowing for sure
   4565   // if we have a weak cell in feedback. We do know it's safe to look
   4566   // at WeakCell::kValueOffset.
   4567   Label try_array, load_smi_map, compare_map;
   4568   Label not_array, miss;
   4569   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4570                         scratch1, &compare_map, &load_smi_map, &try_array);
   4571 
   4572   // Is it a fixed array?
   4573   __ bind(&try_array);
   4574   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4575   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   4576   __ bne(&not_array);
   4577 
   4578   Register scratch2 = r11;
   4579   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
   4580                    &miss);
   4581 
   4582   __ bind(&not_array);
   4583   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   4584   __ bne(&miss);
   4585   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
   4586       Code::ComputeHandlerFlags(Code::STORE_IC));
   4587   masm->isolate()->stub_cache()->GenerateProbe(
   4588       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
   4589       scratch1, scratch2);
   4590 
   4591   __ bind(&miss);
   4592   StoreIC::GenerateMiss(masm);
   4593 
   4594   __ bind(&load_smi_map);
   4595   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   4596   __ b(&compare_map);
   4597 }
   4598 
   4599 
   4600 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
   4601   GenerateImpl(masm, false);
   4602 }
   4603 
   4604 
   4605 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
   4606   GenerateImpl(masm, true);
   4607 }
   4608 
   4609 
   4610 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
   4611                                        Register receiver_map, Register scratch1,
   4612                                        Register scratch2, Label* miss) {
   4613   // feedback initially contains the feedback array
   4614   Label next_loop, prepare_next;
   4615   Label start_polymorphic;
   4616   Label transition_call;
   4617 
   4618   Register cached_map = scratch1;
   4619   Register too_far = scratch2;
   4620   Register pointer_reg = feedback;
   4621   __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
   4622 
   4623   // +-----+------+------+-----+-----+-----+ ... ----+
   4624   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
   4625   // +-----+------+------+-----+-----+ ----+ ... ----+
   4626   //                 0      1     2              len-1
   4627   //                 ^                                 ^
   4628   //                 |                                 |
   4629   //             pointer_reg                        too_far
   4630   //             aka feedback                       scratch2
   4631   // also need receiver_map
   4632   // use cached_map (scratch1) to look in the weak map values.
   4633   __ SmiToPtrArrayOffset(r0, too_far);
   4634   __ add(too_far, feedback, r0);
   4635   __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   4636   __ addi(pointer_reg, feedback,
   4637           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
   4638 
   4639   __ bind(&next_loop);
   4640   __ LoadP(cached_map, MemOperand(pointer_reg));
   4641   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
   4642   __ cmp(receiver_map, cached_map);
   4643   __ bne(&prepare_next);
   4644   // Is it a transitioning store?
   4645   __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
   4646   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
   4647   __ bne(&transition_call);
   4648   __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
   4649   __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
   4650   __ Jump(ip);
   4651 
   4652   __ bind(&transition_call);
   4653   __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
   4654   __ JumpIfSmi(too_far, miss);
   4655 
   4656   __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
   4657 
   4658   // Load the map into the correct register.
   4659   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
   4660   __ mr(feedback, too_far);
   4661 
   4662   __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
   4663   __ Jump(ip);
   4664 
   4665   __ bind(&prepare_next);
   4666   __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
   4667   __ cmpl(pointer_reg, too_far);
   4668   __ blt(&next_loop);
   4669 
   4670   // We exhausted our array of map handler pairs.
   4671   __ b(miss);
   4672 }
   4673 
   4674 
   4675 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   4676   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r4
   4677   Register key = VectorStoreICDescriptor::NameRegister();           // r5
   4678   Register vector = VectorStoreICDescriptor::VectorRegister();      // r6
   4679   Register slot = VectorStoreICDescriptor::SlotRegister();          // r7
   4680   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3));          // r3
   4681   Register feedback = r8;
   4682   Register receiver_map = r9;
   4683   Register scratch1 = r10;
   4684 
   4685   __ SmiToPtrArrayOffset(r0, slot);
   4686   __ add(feedback, vector, r0);
   4687   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
   4688 
   4689   // Try to quickly handle the monomorphic case without knowing for sure
   4690   // if we have a weak cell in feedback. We do know it's safe to look
   4691   // at WeakCell::kValueOffset.
   4692   Label try_array, load_smi_map, compare_map;
   4693   Label not_array, miss;
   4694   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
   4695                         scratch1, &compare_map, &load_smi_map, &try_array);
   4696 
   4697   __ bind(&try_array);
   4698   // Is it a fixed array?
   4699   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
   4700   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
   4701   __ bne(&not_array);
   4702 
   4703   // We have a polymorphic element handler.
   4704   Label polymorphic, try_poly_name;
   4705   __ bind(&polymorphic);
   4706 
   4707   Register scratch2 = r11;
   4708 
   4709   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
   4710                              &miss);
   4711 
   4712   __ bind(&not_array);
   4713   // Is it generic?
   4714   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   4715   __ bne(&try_poly_name);
   4716   Handle<Code> megamorphic_stub =
   4717       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
   4718   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
   4719 
   4720   __ bind(&try_poly_name);
   4721   // We might have a name in feedback, and a fixed array in the next slot.
   4722   __ cmp(key, feedback);
   4723   __ bne(&miss);
   4724   // If the name comparison succeeded, we know we have a fixed array with
   4725   // at least one map/handler pair.
   4726   __ SmiToPtrArrayOffset(r0, slot);
   4727   __ add(feedback, vector, r0);
   4728   __ LoadP(feedback,
   4729            FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   4730   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
   4731                    &miss);
   4732 
   4733   __ bind(&miss);
   4734   KeyedStoreIC::GenerateMiss(masm);
   4735 
   4736   __ bind(&load_smi_map);
   4737   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
   4738   __ b(&compare_map);
   4739 }
   4740 
   4741 
   4742 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   4743   if (masm->isolate()->function_entry_hook() != NULL) {
   4744     PredictableCodeSizeScope predictable(masm,
   4745 #if V8_TARGET_ARCH_PPC64
   4746                                          14 * Assembler::kInstrSize);
   4747 #else
   4748                                          11 * Assembler::kInstrSize);
   4749 #endif
   4750     ProfileEntryHookStub stub(masm->isolate());
   4751     __ mflr(r0);
   4752     __ Push(r0, ip);
   4753     __ CallStub(&stub);
   4754     __ Pop(r0, ip);
   4755     __ mtlr(r0);
   4756   }
   4757 }
   4758 
   4759 
   4760 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   4761   // The entry hook is a "push lr, ip" instruction, followed by a call.
   4762   const int32_t kReturnAddressDistanceFromFunctionStart =
   4763       Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
   4764 
   4765   // This should contain all kJSCallerSaved registers.
   4766   const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers.
   4767                              r15.bit();        // Saved stack pointer.
   4768 
   4769   // We also save lr, so the count here is one higher than the mask indicates.
   4770   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
   4771 
   4772   // Save all caller-save registers as this may be called from anywhere.
   4773   __ mflr(ip);
   4774   __ MultiPush(kSavedRegs | ip.bit());
   4775 
   4776   // Compute the function's address for the first argument.
   4777   __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
   4778 
   4779   // The caller's return address is two slots above the saved temporaries.
   4780   // Grab that for the second argument to the hook.
   4781   __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
   4782 
   4783   // Align the stack if necessary.
   4784   int frame_alignment = masm->ActivationFrameAlignment();
   4785   if (frame_alignment > kPointerSize) {
   4786     __ mr(r15, sp);
   4787     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   4788     __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
   4789   }
   4790 
   4791 #if !defined(USE_SIMULATOR)
   4792   uintptr_t entry_hook =
   4793       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
   4794   __ mov(ip, Operand(entry_hook));
   4795 
   4796 #if ABI_USES_FUNCTION_DESCRIPTORS
   4797   // Function descriptor
   4798   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
   4799   __ LoadP(ip, MemOperand(ip, 0));
   4800 #elif ABI_CALL_VIA_IP
   4801 // ip set above, so nothing to do.
   4802 #endif
   4803 
   4804   // PPC LINUX ABI:
   4805   __ li(r0, Operand::Zero());
   4806   __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
   4807 #else
   4808   // Under the simulator we need to indirect the entry hook through a
   4809   // trampoline function at a known address.
   4810   // It additionally takes an isolate as a third parameter
   4811   __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
   4812 
   4813   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   4814   __ mov(ip, Operand(ExternalReference(
   4815                  &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
   4816 #endif
   4817   __ Call(ip);
   4818 
   4819 #if !defined(USE_SIMULATOR)
   4820   __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
   4821 #endif
   4822 
   4823   // Restore the stack pointer if needed.
   4824   if (frame_alignment > kPointerSize) {
   4825     __ mr(sp, r15);
   4826   }
   4827 
   4828   // Also pop lr to get Ret(0).
   4829   __ MultiPop(kSavedRegs | ip.bit());
   4830   __ mtlr(ip);
   4831   __ Ret();
   4832 }
   4833 
   4834 
   4835 template <class T>
   4836 static void CreateArrayDispatch(MacroAssembler* masm,
   4837                                 AllocationSiteOverrideMode mode) {
   4838   if (mode == DISABLE_ALLOCATION_SITES) {
   4839     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   4840     __ TailCallStub(&stub);
   4841   } else if (mode == DONT_OVERRIDE) {
   4842     int last_index =
   4843         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   4844     for (int i = 0; i <= last_index; ++i) {
   4845       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4846       __ Cmpi(r6, Operand(kind), r0);
   4847       T stub(masm->isolate(), kind);
   4848       __ TailCallStub(&stub, eq);
   4849     }
   4850 
   4851     // If we reached this point there is a problem.
   4852     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4853   } else {
   4854     UNREACHABLE();
   4855   }
   4856 }
   4857 
   4858 
   4859 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   4860                                            AllocationSiteOverrideMode mode) {
   4861   // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   4862   // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
   4863   // r3 - number of arguments
   4864   // r4 - constructor?
   4865   // sp[0] - last argument
   4866   Label normal_sequence;
   4867   if (mode == DONT_OVERRIDE) {
   4868     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   4869     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   4870     STATIC_ASSERT(FAST_ELEMENTS == 2);
   4871     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   4872     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   4873     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   4874 
   4875     // is the low bit set? If so, we are holey and that is good.
   4876     __ andi(r0, r6, Operand(1));
   4877     __ bne(&normal_sequence, cr0);
   4878   }
   4879 
   4880   // look at the first argument
   4881   __ LoadP(r8, MemOperand(sp, 0));
   4882   __ cmpi(r8, Operand::Zero());
   4883   __ beq(&normal_sequence);
   4884 
   4885   if (mode == DISABLE_ALLOCATION_SITES) {
   4886     ElementsKind initial = GetInitialFastElementsKind();
   4887     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   4888 
   4889     ArraySingleArgumentConstructorStub stub_holey(
   4890         masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
   4891     __ TailCallStub(&stub_holey);
   4892 
   4893     __ bind(&normal_sequence);
   4894     ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
   4895                                             DISABLE_ALLOCATION_SITES);
   4896     __ TailCallStub(&stub);
   4897   } else if (mode == DONT_OVERRIDE) {
   4898     // We are going to create a holey array, but our kind is non-holey.
   4899     // Fix kind and retry (only if we have an allocation site in the slot).
   4900     __ addi(r6, r6, Operand(1));
   4901 
   4902     if (FLAG_debug_code) {
   4903       __ LoadP(r8, FieldMemOperand(r5, 0));
   4904       __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
   4905       __ Assert(eq, kExpectedAllocationSite);
   4906     }
   4907 
   4908     // Save the resulting elements kind in type info. We can't just store r6
   4909     // in the AllocationSite::transition_info field because elements kind is
   4910     // restricted to a portion of the field...upper bits need to be left alone.
   4911     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   4912     __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
   4913     __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
   4914     __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
   4915               r0);
   4916 
   4917     __ bind(&normal_sequence);
   4918     int last_index =
   4919         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   4920     for (int i = 0; i <= last_index; ++i) {
   4921       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4922       __ mov(r0, Operand(kind));
   4923       __ cmp(r6, r0);
   4924       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
   4925       __ TailCallStub(&stub, eq);
   4926     }
   4927 
   4928     // If we reached this point there is a problem.
   4929     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   4930   } else {
   4931     UNREACHABLE();
   4932   }
   4933 }
   4934 
   4935 
   4936 template <class T>
   4937 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   4938   int to_index =
   4939       GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   4940   for (int i = 0; i <= to_index; ++i) {
   4941     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   4942     T stub(isolate, kind);
   4943     stub.GetCode();
   4944     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   4945       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   4946       stub1.GetCode();
   4947     }
   4948   }
   4949 }
   4950 
   4951 
   4952 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
   4953   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   4954       isolate);
   4955   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
   4956       isolate);
   4957   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
   4958       isolate);
   4959 }
   4960 
   4961 
   4962 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
   4963     Isolate* isolate) {
   4964   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
   4965   for (int i = 0; i < 2; i++) {
   4966     // For internal arrays we only need a few things
   4967     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   4968     stubh1.GetCode();
   4969     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   4970     stubh2.GetCode();
   4971     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
   4972     stubh3.GetCode();
   4973   }
   4974 }
   4975 
   4976 
   4977 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   4978     MacroAssembler* masm, AllocationSiteOverrideMode mode) {
   4979   if (argument_count() == ANY) {
   4980     Label not_zero_case, not_one_case;
   4981     __ cmpi(r3, Operand::Zero());
   4982     __ bne(&not_zero_case);
   4983     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   4984 
   4985     __ bind(&not_zero_case);
   4986     __ cmpi(r3, Operand(1));
   4987     __ bgt(&not_one_case);
   4988     CreateArrayDispatchOneArgument(masm, mode);
   4989 
   4990     __ bind(&not_one_case);
   4991     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   4992   } else if (argument_count() == NONE) {
   4993     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   4994   } else if (argument_count() == ONE) {
   4995     CreateArrayDispatchOneArgument(masm, mode);
   4996   } else if (argument_count() == MORE_THAN_ONE) {
   4997     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   4998   } else {
   4999     UNREACHABLE();
   5000   }
   5001 }
   5002 
   5003 
   5004 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   5005   // ----------- S t a t e -------------
   5006   //  -- r3 : argc (only if argument_count() == ANY)
   5007   //  -- r4 : constructor
   5008   //  -- r5 : AllocationSite or undefined
   5009   //  -- r6 : new target
   5010   //  -- sp[0] : return address
   5011   //  -- sp[4] : last argument
   5012   // -----------------------------------
   5013 
   5014   if (FLAG_debug_code) {
   5015     // The array construct code is only set for the global and natives
   5016     // builtin Array functions which always have maps.
   5017 
   5018     // Initial map for the builtin Array function should be a map.
   5019     __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
   5020     // Will both indicate a NULL and a Smi.
   5021     __ TestIfSmi(r7, r0);
   5022     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
   5023     __ CompareObjectType(r7, r7, r8, MAP_TYPE);
   5024     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
   5025 
   5026     // We should either have undefined in r5 or a valid AllocationSite
   5027     __ AssertUndefinedOrAllocationSite(r5, r7);
   5028   }
   5029 
   5030   // Enter the context of the Array function.
   5031   __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
   5032 
   5033   Label subclassing;
   5034   __ cmp(r6, r4);
   5035   __ bne(&subclassing);
   5036 
   5037   Label no_info;
   5038   // Get the elements kind and case on that.
   5039   __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
   5040   __ beq(&no_info);
   5041 
   5042   __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
   5043   __ SmiUntag(r6);
   5044   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   5045   __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
   5046   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   5047 
   5048   __ bind(&no_info);
   5049   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   5050 
   5051   __ bind(&subclassing);
   5052   switch (argument_count()) {
   5053     case ANY:
   5054     case MORE_THAN_ONE:
   5055       __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
   5056       __ StorePX(r4, MemOperand(sp, r0));
   5057       __ addi(r3, r3, Operand(3));
   5058       break;
   5059     case NONE:
   5060       __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
   5061       __ li(r3, Operand(3));
   5062       break;
   5063     case ONE:
   5064       __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
   5065       __ li(r3, Operand(4));
   5066       break;
   5067   }
   5068 
   5069   __ Push(r6, r5);
   5070   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
   5071 }
   5072 
   5073 
   5074 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
   5075                                                 ElementsKind kind) {
   5076   __ cmpli(r3, Operand(1));
   5077 
   5078   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   5079   __ TailCallStub(&stub0, lt);
   5080 
   5081   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
   5082   __ TailCallStub(&stubN, gt);
   5083 
   5084   if (IsFastPackedElementsKind(kind)) {
   5085     // We might need to create a holey array
   5086     // look at the first argument
   5087     __ LoadP(r6, MemOperand(sp, 0));
   5088     __ cmpi(r6, Operand::Zero());
   5089 
   5090     InternalArraySingleArgumentConstructorStub stub1_holey(
   5091         isolate(), GetHoleyElementsKind(kind));
   5092     __ TailCallStub(&stub1_holey, ne);
   5093   }
   5094 
   5095   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   5096   __ TailCallStub(&stub1);
   5097 }
   5098 
   5099 
   5100 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   5101   // ----------- S t a t e -------------
   5102   //  -- r3 : argc
   5103   //  -- r4 : constructor
   5104   //  -- sp[0] : return address
   5105   //  -- sp[4] : last argument
   5106   // -----------------------------------
   5107 
   5108   if (FLAG_debug_code) {
   5109     // The array construct code is only set for the global and natives
   5110     // builtin Array functions which always have maps.
   5111 
   5112     // Initial map for the builtin Array function should be a map.
   5113     __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
   5114     // Will both indicate a NULL and a Smi.
   5115     __ TestIfSmi(r6, r0);
   5116     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
   5117     __ CompareObjectType(r6, r6, r7, MAP_TYPE);
   5118     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
   5119   }
   5120 
   5121   // Figure out the right elements kind
   5122   __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
   5123   // Load the map's "bit field 2" into |result|.
   5124   __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
   5125   // Retrieve elements_kind from bit field 2.
   5126   __ DecodeField<Map::ElementsKindBits>(r6);
   5127 
   5128   if (FLAG_debug_code) {
   5129     Label done;
   5130     __ cmpi(r6, Operand(FAST_ELEMENTS));
   5131     __ beq(&done);
   5132     __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
   5133     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
   5134     __ bind(&done);
   5135   }
   5136 
   5137   Label fast_elements_case;
   5138   __ cmpi(r6, Operand(FAST_ELEMENTS));
   5139   __ beq(&fast_elements_case);
   5140   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   5141 
   5142   __ bind(&fast_elements_case);
   5143   GenerateCase(masm, FAST_ELEMENTS);
   5144 }
   5145 
   5146 
   5147 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   5148   Register context = cp;
   5149   Register result = r3;
   5150   Register slot = r5;
   5151 
   5152   // Go up the context chain to the script context.
   5153   for (int i = 0; i < depth(); ++i) {
   5154     __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
   5155     context = result;
   5156   }
   5157 
   5158   // Load the PropertyCell value at the specified slot.
   5159   __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
   5160   __ add(result, context, r0);
   5161   __ LoadP(result, ContextMemOperand(result));
   5162   __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
   5163 
   5164   // If the result is not the_hole, return. Otherwise, handle in the runtime.
   5165   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   5166   __ Ret(ne);
   5167 
   5168   // Fallback to runtime.
   5169   __ SmiTag(slot);
   5170   __ Push(slot);
   5171   __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
   5172 }
   5173 
   5174 
   5175 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
   5176   Register value = r3;
   5177   Register slot = r5;
   5178 
   5179   Register cell = r4;
   5180   Register cell_details = r6;
   5181   Register cell_value = r7;
   5182   Register cell_value_map = r8;
   5183   Register scratch = r9;
   5184 
   5185   Register context = cp;
   5186   Register context_temp = cell;
   5187 
   5188   Label fast_heapobject_case, fast_smi_case, slow_case;
   5189 
   5190   if (FLAG_debug_code) {
   5191     __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
   5192     __ Check(ne, kUnexpectedValue);
   5193   }
   5194 
   5195   // Go up the context chain to the script context.
   5196   for (int i = 0; i < depth(); i++) {
   5197     __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
   5198     context = context_temp;
   5199   }
   5200 
   5201   // Load the PropertyCell at the specified slot.
   5202   __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
   5203   __ add(cell, context, r0);
   5204   __ LoadP(cell, ContextMemOperand(cell));
   5205 
   5206   // Load PropertyDetails for the cell (actually only the cell_type and kind).
   5207   __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
   5208   __ SmiUntag(cell_details);
   5209   __ andi(cell_details, cell_details,
   5210           Operand(PropertyDetails::PropertyCellTypeField::kMask |
   5211                   PropertyDetails::KindField::kMask |
   5212                   PropertyDetails::kAttributesReadOnlyMask));
   5213 
   5214   // Check if PropertyCell holds mutable data.
   5215   Label not_mutable_data;
   5216   __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   5217                                     PropertyCellType::kMutable) |
   5218                                 PropertyDetails::KindField::encode(kData)));
   5219   __ bne(&not_mutable_data);
   5220   __ JumpIfSmi(value, &fast_smi_case);
   5221 
   5222   __ bind(&fast_heapobject_case);
   5223   __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
   5224   // RecordWriteField clobbers the value register, so we copy it before the
   5225   // call.
   5226   __ mr(r6, value);
   5227   __ RecordWriteField(cell, PropertyCell::kValueOffset, r6, scratch,
   5228                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
   5229                       OMIT_SMI_CHECK);
   5230   __ Ret();
   5231 
   5232   __ bind(&not_mutable_data);
   5233   // Check if PropertyCell value matches the new value (relevant for Constant,
   5234   // ConstantType and Undefined cells).
   5235   Label not_same_value;
   5236   __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
   5237   __ cmp(cell_value, value);
   5238   __ bne(&not_same_value);
   5239 
   5240   // Make sure the PropertyCell is not marked READ_ONLY.
   5241   __ andi(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
   5242   __ bne(&slow_case, cr0);
   5243 
   5244   if (FLAG_debug_code) {
   5245     Label done;
   5246     // This can only be true for Constant, ConstantType and Undefined cells,
   5247     // because we never store the_hole via this stub.
   5248     __ cmpi(cell_details,
   5249             Operand(PropertyDetails::PropertyCellTypeField::encode(
   5250                         PropertyCellType::kConstant) |
   5251                     PropertyDetails::KindField::encode(kData)));
   5252     __ beq(&done);
   5253     __ cmpi(cell_details,
   5254             Operand(PropertyDetails::PropertyCellTypeField::encode(
   5255                         PropertyCellType::kConstantType) |
   5256                     PropertyDetails::KindField::encode(kData)));
   5257     __ beq(&done);
   5258     __ cmpi(cell_details,
   5259             Operand(PropertyDetails::PropertyCellTypeField::encode(
   5260                         PropertyCellType::kUndefined) |
   5261                     PropertyDetails::KindField::encode(kData)));
   5262     __ Check(eq, kUnexpectedValue);
   5263     __ bind(&done);
   5264   }
   5265   __ Ret();
   5266   __ bind(&not_same_value);
   5267 
   5268   // Check if PropertyCell contains data with constant type (and is not
   5269   // READ_ONLY).
   5270   __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
   5271                                     PropertyCellType::kConstantType) |
   5272                                 PropertyDetails::KindField::encode(kData)));
   5273   __ bne(&slow_case);
   5274 
   5275   // Now either both old and new values must be smis or both must be heap
   5276   // objects with same map.
   5277   Label value_is_heap_object;
   5278   __ JumpIfNotSmi(value, &value_is_heap_object);
   5279   __ JumpIfNotSmi(cell_value, &slow_case);
   5280   // Old and new values are smis, no need for a write barrier here.
   5281   __ bind(&fast_smi_case);
   5282   __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
   5283   __ Ret();
   5284 
   5285   __ bind(&value_is_heap_object);
   5286   __ JumpIfSmi(cell_value, &slow_case);
   5287 
   5288   __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
   5289   __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   5290   __ cmp(cell_value_map, scratch);
   5291   __ beq(&fast_heapobject_case);
   5292 
   5293   // Fallback to runtime.
   5294   __ bind(&slow_case);
   5295   __ SmiTag(slot);
   5296   __ Push(slot, value);
   5297   __ TailCallRuntime(is_strict(language_mode())
   5298                          ? Runtime::kStoreGlobalViaContext_Strict
   5299                          : Runtime::kStoreGlobalViaContext_Sloppy);
   5300 }
   5301 
   5302 
   5303 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   5304   return ref0.address() - ref1.address();
   5305 }
   5306 
   5307 
   5308 // Calls an API function.  Allocates HandleScope, extracts returned value
   5309 // from handle and propagates exceptions.  Restores context.  stack_space
   5310 // - space to be unwound on exit (includes the call JS arguments space and
   5311 // the additional space allocated for the fast call).
   5312 static void CallApiFunctionAndReturn(MacroAssembler* masm,
   5313                                      Register function_address,
   5314                                      ExternalReference thunk_ref,
   5315                                      int stack_space,
   5316                                      MemOperand* stack_space_operand,
   5317                                      MemOperand return_value_operand,
   5318                                      MemOperand* context_restore_operand) {
   5319   Isolate* isolate = masm->isolate();
   5320   ExternalReference next_address =
   5321       ExternalReference::handle_scope_next_address(isolate);
   5322   const int kNextOffset = 0;
   5323   const int kLimitOffset = AddressOffset(
   5324       ExternalReference::handle_scope_limit_address(isolate), next_address);
   5325   const int kLevelOffset = AddressOffset(
   5326       ExternalReference::handle_scope_level_address(isolate), next_address);
   5327 
   5328   // Additional parameter is the address of the actual callback.
   5329   DCHECK(function_address.is(r4) || function_address.is(r5));
   5330   Register scratch = r6;
   5331 
   5332   __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
   5333   __ lbz(scratch, MemOperand(scratch, 0));
   5334   __ cmpi(scratch, Operand::Zero());
   5335 
   5336   if (CpuFeatures::IsSupported(ISELECT)) {
   5337     __ mov(scratch, Operand(thunk_ref));
   5338     __ isel(eq, scratch, function_address, scratch);
   5339   } else {
   5340     Label profiler_disabled;
   5341     Label end_profiler_check;
   5342     __ beq(&profiler_disabled);
   5343     __ mov(scratch, Operand(thunk_ref));
   5344     __ b(&end_profiler_check);
   5345     __ bind(&profiler_disabled);
   5346     __ mr(scratch, function_address);
   5347     __ bind(&end_profiler_check);
   5348   }
   5349 
   5350   // Allocate HandleScope in callee-save registers.
   5351   // r17 - next_address
   5352   // r14 - next_address->kNextOffset
   5353   // r15 - next_address->kLimitOffset
   5354   // r16 - next_address->kLevelOffset
   5355   __ mov(r17, Operand(next_address));
   5356   __ LoadP(r14, MemOperand(r17, kNextOffset));
   5357   __ LoadP(r15, MemOperand(r17, kLimitOffset));
   5358   __ lwz(r16, MemOperand(r17, kLevelOffset));
   5359   __ addi(r16, r16, Operand(1));
   5360   __ stw(r16, MemOperand(r17, kLevelOffset));
   5361 
   5362   if (FLAG_log_timer_events) {
   5363     FrameScope frame(masm, StackFrame::MANUAL);
   5364     __ PushSafepointRegisters();
   5365     __ PrepareCallCFunction(1, r3);
   5366     __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
   5367     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
   5368                      1);
   5369     __ PopSafepointRegisters();
   5370   }
   5371 
   5372   // Native call returns to the DirectCEntry stub which redirects to the
   5373   // return address pushed on stack (could have moved after GC).
   5374   // DirectCEntry stub itself is generated early and never moves.
   5375   DirectCEntryStub stub(isolate);
   5376   stub.GenerateCall(masm, scratch);
   5377 
   5378   if (FLAG_log_timer_events) {
   5379     FrameScope frame(masm, StackFrame::MANUAL);
   5380     __ PushSafepointRegisters();
   5381     __ PrepareCallCFunction(1, r3);
   5382     __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
   5383     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
   5384                      1);
   5385     __ PopSafepointRegisters();
   5386   }
   5387 
   5388   Label promote_scheduled_exception;
   5389   Label delete_allocated_handles;
   5390   Label leave_exit_frame;
   5391   Label return_value_loaded;
   5392 
   5393   // load value from ReturnValue
   5394   __ LoadP(r3, return_value_operand);
   5395   __ bind(&return_value_loaded);
   5396   // No more valid handles (the result handle was the last one). Restore
   5397   // previous handle scope.
   5398   __ StoreP(r14, MemOperand(r17, kNextOffset));
   5399   if (__ emit_debug_code()) {
   5400     __ lwz(r4, MemOperand(r17, kLevelOffset));
   5401     __ cmp(r4, r16);
   5402     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
   5403   }
   5404   __ subi(r16, r16, Operand(1));
   5405   __ stw(r16, MemOperand(r17, kLevelOffset));
   5406   __ LoadP(r0, MemOperand(r17, kLimitOffset));
   5407   __ cmp(r15, r0);
   5408   __ bne(&delete_allocated_handles);
   5409 
   5410   // Leave the API exit frame.
   5411   __ bind(&leave_exit_frame);
   5412   bool restore_context = context_restore_operand != NULL;
   5413   if (restore_context) {
   5414     __ LoadP(cp, *context_restore_operand);
   5415   }
   5416   // LeaveExitFrame expects unwind space to be in a register.
   5417   if (stack_space_operand != NULL) {
   5418     __ lwz(r14, *stack_space_operand);
   5419   } else {
   5420     __ mov(r14, Operand(stack_space));
   5421   }
   5422   __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
   5423 
   5424   // Check if the function scheduled an exception.
   5425   __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
   5426   __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
   5427   __ LoadP(r15, MemOperand(r15));
   5428   __ cmp(r14, r15);
   5429   __ bne(&promote_scheduled_exception);
   5430 
   5431   __ blr();
   5432 
   5433   // Re-throw by promoting a scheduled exception.
   5434   __ bind(&promote_scheduled_exception);
   5435   __ TailCallRuntime(Runtime::kPromoteScheduledException);
   5436 
   5437   // HandleScope limit has changed. Delete allocated extensions.
   5438   __ bind(&delete_allocated_handles);
   5439   __ StoreP(r15, MemOperand(r17, kLimitOffset));
   5440   __ mr(r14, r3);
   5441   __ PrepareCallCFunction(1, r15);
   5442   __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
   5443   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
   5444                    1);
   5445   __ mr(r3, r14);
   5446   __ b(&leave_exit_frame);
   5447 }
   5448 
   5449 
   5450 static void CallApiFunctionStubHelper(MacroAssembler* masm,
   5451                                       const ParameterCount& argc,
   5452                                       bool return_first_arg,
   5453                                       bool call_data_undefined) {
   5454   // ----------- S t a t e -------------
   5455   //  -- r3                  : callee
   5456   //  -- r7                  : call_data
   5457   //  -- r5                  : holder
   5458   //  -- r4                  : api_function_address
   5459   //  -- r6                  : number of arguments if argc is a register
   5460   //  -- cp                  : context
   5461   //  --
   5462   //  -- sp[0]               : last argument
   5463   //  -- ...
   5464   //  -- sp[(argc - 1)* 4]   : first argument
   5465   //  -- sp[argc * 4]        : receiver
   5466   // -----------------------------------
   5467 
   5468   Register callee = r3;
   5469   Register call_data = r7;
   5470   Register holder = r5;
   5471   Register api_function_address = r4;
   5472   Register context = cp;
   5473 
   5474   typedef FunctionCallbackArguments FCA;
   5475 
   5476   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   5477   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   5478   STATIC_ASSERT(FCA::kDataIndex == 4);
   5479   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   5480   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   5481   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   5482   STATIC_ASSERT(FCA::kHolderIndex == 0);
   5483   STATIC_ASSERT(FCA::kArgsLength == 7);
   5484 
   5485   DCHECK(argc.is_immediate() || r3.is(argc.reg()));
   5486 
   5487   // context save
   5488   __ push(context);
   5489   // load context from callee
   5490   __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   5491 
   5492   // callee
   5493   __ push(callee);
   5494 
   5495   // call data
   5496   __ push(call_data);
   5497 
   5498   Register scratch = call_data;
   5499   if (!call_data_undefined) {
   5500     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   5501   }
   5502   // return value
   5503   __ push(scratch);
   5504   // return value default
   5505   __ push(scratch);
   5506   // isolate
   5507   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
   5508   __ push(scratch);
   5509   // holder
   5510   __ push(holder);
   5511 
   5512   // Prepare arguments.
   5513   __ mr(scratch, sp);
   5514 
   5515   // Allocate the v8::Arguments structure in the arguments' space since
   5516   // it's not controlled by GC.
   5517   // PPC LINUX ABI:
   5518   //
   5519   // Create 5 extra slots on stack:
   5520   //    [0] space for DirectCEntryStub's LR save
   5521   //    [1-4] FunctionCallbackInfo
   5522   const int kApiStackSpace = 5;
   5523   const int kFunctionCallbackInfoOffset =
   5524       (kStackFrameExtraParamSlot + 1) * kPointerSize;
   5525 
   5526   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5527   __ EnterExitFrame(false, kApiStackSpace);
   5528 
   5529   DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
   5530   // r3 = FunctionCallbackInfo&
   5531   // Arguments is after the return address.
   5532   __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
   5533   // FunctionCallbackInfo::implicit_args_
   5534   __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
   5535   if (argc.is_immediate()) {
   5536     // FunctionCallbackInfo::values_
   5537     __ addi(ip, scratch,
   5538             Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
   5539     __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
   5540     // FunctionCallbackInfo::length_ = argc
   5541     __ li(ip, Operand(argc.immediate()));
   5542     __ stw(ip, MemOperand(r3, 2 * kPointerSize));
   5543     // FunctionCallbackInfo::is_construct_call_ = 0
   5544     __ li(ip, Operand::Zero());
   5545     __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
   5546   } else {
   5547     __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2));
   5548     __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
   5549     // FunctionCallbackInfo::values_
   5550     __ add(r0, scratch, ip);
   5551     __ StoreP(r0, MemOperand(r3, 1 * kPointerSize));
   5552     // FunctionCallbackInfo::length_ = argc
   5553     __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize));
   5554     // FunctionCallbackInfo::is_construct_call_
   5555     __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
   5556   }
   5557 
   5558   ExternalReference thunk_ref =
   5559       ExternalReference::invoke_function_callback(masm->isolate());
   5560 
   5561   AllowExternalCallThatCantCauseGC scope(masm);
   5562   MemOperand context_restore_operand(
   5563       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   5564   // Stores return the first js argument
   5565   int return_value_offset = 0;
   5566   if (return_first_arg) {
   5567     return_value_offset = 2 + FCA::kArgsLength;
   5568   } else {
   5569     return_value_offset = 2 + FCA::kReturnValueOffset;
   5570   }
   5571   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   5572   int stack_space = 0;
   5573   MemOperand is_construct_call_operand =
   5574       MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
   5575   MemOperand* stack_space_operand = &is_construct_call_operand;
   5576   if (argc.is_immediate()) {
   5577     stack_space = argc.immediate() + FCA::kArgsLength + 1;
   5578     stack_space_operand = NULL;
   5579   }
   5580   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
   5581                            stack_space_operand, return_value_operand,
   5582                            &context_restore_operand);
   5583 }
   5584 
   5585 
   5586 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   5587   bool call_data_undefined = this->call_data_undefined();
   5588   CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
   5589                             call_data_undefined);
   5590 }
   5591 
   5592 
   5593 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
   5594   bool is_store = this->is_store();
   5595   int argc = this->argc();
   5596   bool call_data_undefined = this->call_data_undefined();
   5597   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
   5598                             call_data_undefined);
   5599 }
   5600 
   5601 
   5602 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   5603   // ----------- S t a t e -------------
   5604   //  -- sp[0]                  : name
   5605   //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
   5606   //  -- ...
   5607   //  -- r5                     : api_function_address
   5608   // -----------------------------------
   5609 
   5610   Register api_function_address = ApiGetterDescriptor::function_address();
   5611   DCHECK(api_function_address.is(r5));
   5612 
   5613   __ mr(r3, sp);                               // r0 = Handle<Name>
   5614   __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = PCA
   5615 
   5616 // If ABI passes Handles (pointer-sized struct) in a register:
   5617 //
   5618 // Create 2 extra slots on stack:
   5619 //    [0] space for DirectCEntryStub's LR save
   5620 //    [1] AccessorInfo&
   5621 //
   5622 // Otherwise:
   5623 //
   5624 // Create 3 extra slots on stack:
   5625 //    [0] space for DirectCEntryStub's LR save
   5626 //    [1] copy of Handle (first arg)
   5627 //    [2] AccessorInfo&
   5628 #if ABI_PASSES_HANDLES_IN_REGS
   5629   const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
   5630   const int kApiStackSpace = 2;
   5631 #else
   5632   const int kArg0Slot = kStackFrameExtraParamSlot + 1;
   5633   const int kAccessorInfoSlot = kArg0Slot + 1;
   5634   const int kApiStackSpace = 3;
   5635 #endif
   5636 
   5637   FrameScope frame_scope(masm, StackFrame::MANUAL);
   5638   __ EnterExitFrame(false, kApiStackSpace);
   5639 
   5640 #if !ABI_PASSES_HANDLES_IN_REGS
   5641   // pass 1st arg by reference
   5642   __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
   5643   __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
   5644 #endif
   5645 
   5646   // Create PropertyAccessorInfo instance on the stack above the exit frame with
   5647   // r4 (internal::Object** args_) as the data.
   5648   __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
   5649   // r4 = AccessorInfo&
   5650   __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
   5651 
   5652   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   5653 
   5654   ExternalReference thunk_ref =
   5655       ExternalReference::invoke_accessor_getter_callback(isolate());
   5656   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
   5657                            kStackUnwindSpace, NULL,
   5658                            MemOperand(fp, 6 * kPointerSize), NULL);
   5659 }
   5660 
   5661 
   5662 #undef __
   5663 }  // namespace internal
   5664 }  // namespace v8
   5665 
   5666 #endif  // V8_TARGET_ARCH_PPC
   5667