Home | History | Annotate | Download | only in s390
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_S390
      6 
      7 #include "src/code-stubs.h"
      8 #include "src/api-arguments.h"
      9 #include "src/base/bits.h"
     10 #include "src/bootstrapper.h"
     11 #include "src/codegen.h"
     12 #include "src/ic/handler-compiler.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 #include "src/isolate.h"
     16 #include "src/regexp/jsregexp.h"
     17 #include "src/regexp/regexp-macro-assembler.h"
     18 #include "src/runtime/runtime.h"
     19 #include "src/s390/code-stubs-s390.h"
     20 
     21 namespace v8 {
     22 namespace internal {
     23 
     24 #define __ ACCESS_MASM(masm)
     25 
     26 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
     27   __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
     28   __ StoreP(r3, MemOperand(sp, r1));
     29   __ push(r3);
     30   __ push(r4);
     31   __ AddP(r2, r2, Operand(3));
     32   __ TailCallRuntime(Runtime::kNewArray);
     33 }
     34 
     35 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
     36                                           Condition cond);
     37 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
     38                                     Register rhs, Label* lhs_not_nan,
     39                                     Label* slow, bool strict);
     40 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
     41                                            Register rhs);
     42 
     43 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
     44                                                ExternalReference miss) {
     45   // Update the static counter each time a new code stub is generated.
     46   isolate()->counters()->code_stubs()->Increment();
     47 
     48   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
     49   int param_count = descriptor.GetRegisterParameterCount();
     50   {
     51     // Call the runtime system in a fresh internal frame.
     52     FrameScope scope(masm, StackFrame::INTERNAL);
     53     DCHECK(param_count == 0 ||
     54            r2.is(descriptor.GetRegisterParameter(param_count - 1)));
     55     // Push arguments
     56     for (int i = 0; i < param_count; ++i) {
     57       __ push(descriptor.GetRegisterParameter(i));
     58     }
     59     __ CallExternalReference(miss, param_count);
     60   }
     61 
     62   __ Ret();
     63 }
     64 
     65 void DoubleToIStub::Generate(MacroAssembler* masm) {
     66   Label out_of_range, only_low, negate, done, fastpath_done;
     67   Register input_reg = source();
     68   Register result_reg = destination();
     69   DCHECK(is_truncating());
     70 
     71   int double_offset = offset();
     72 
     73   // Immediate values for this stub fit in instructions, so it's safe to use ip.
     74   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
     75   Register scratch_low =
     76       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
     77   Register scratch_high =
     78       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
     79   DoubleRegister double_scratch = kScratchDoubleReg;
     80 
     81   __ push(scratch);
     82   // Account for saved regs if input is sp.
     83   if (input_reg.is(sp)) double_offset += kPointerSize;
     84 
     85   if (!skip_fastpath()) {
     86     // Load double input.
     87     __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset));
     88 
     89     // Do fast-path convert from double to int.
     90     __ ConvertDoubleToInt64(double_scratch,
     91 #if !V8_TARGET_ARCH_S390X
     92                             scratch,
     93 #endif
     94                             result_reg, d0);
     95 
     96 // Test for overflow
     97 #if V8_TARGET_ARCH_S390X
     98     __ TestIfInt32(result_reg, r0);
     99 #else
    100     __ TestIfInt32(scratch, result_reg, r0);
    101 #endif
    102     __ beq(&fastpath_done, Label::kNear);
    103   }
    104 
    105   __ Push(scratch_high, scratch_low);
    106   // Account for saved regs if input is sp.
    107   if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
    108 
    109   __ LoadlW(scratch_high,
    110             MemOperand(input_reg, double_offset + Register::kExponentOffset));
    111   __ LoadlW(scratch_low,
    112             MemOperand(input_reg, double_offset + Register::kMantissaOffset));
    113 
    114   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
    115   // Load scratch with exponent - 1. This is faster than loading
    116   // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
    117   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
    118   __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
    119   // If exponent is greater than or equal to 84, the 32 less significant
    120   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
    121   // the result is 0.
    122   // Compare exponent with 84 (compare exponent - 1 with 83).
    123   __ CmpP(scratch, Operand(83));
    124   __ bge(&out_of_range, Label::kNear);
    125 
    126   // If we reach this code, 31 <= exponent <= 83.
    127   // So, we don't have to handle cases where 0 <= exponent <= 20 for
    128   // which we would need to shift right the high part of the mantissa.
    129   // Scratch contains exponent - 1.
    130   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
    131   __ Load(r0, Operand(51));
    132   __ SubP(scratch, r0, scratch);
    133   __ CmpP(scratch, Operand::Zero());
    134   __ ble(&only_low, Label::kNear);
    135   // 21 <= exponent <= 51, shift scratch_low and scratch_high
    136   // to generate the result.
    137   __ ShiftRight(scratch_low, scratch_low, scratch);
    138   // Scratch contains: 52 - exponent.
    139   // We needs: exponent - 20.
    140   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
    141   __ Load(r0, Operand(32));
    142   __ SubP(scratch, r0, scratch);
    143   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
    144   // Set the implicit 1 before the mantissa part in scratch_high.
    145   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
    146   __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
    147   __ ShiftLeftP(r0, r0, Operand(16));
    148   __ OrP(result_reg, result_reg, r0);
    149   __ ShiftLeft(r0, result_reg, scratch);
    150   __ OrP(result_reg, scratch_low, r0);
    151   __ b(&negate, Label::kNear);
    152 
    153   __ bind(&out_of_range);
    154   __ mov(result_reg, Operand::Zero());
    155   __ b(&done, Label::kNear);
    156 
    157   __ bind(&only_low);
    158   // 52 <= exponent <= 83, shift only scratch_low.
    159   // On entry, scratch contains: 52 - exponent.
    160   __ LoadComplementRR(scratch, scratch);
    161   __ ShiftLeft(result_reg, scratch_low, scratch);
    162 
    163   __ bind(&negate);
    164   // If input was positive, scratch_high ASR 31 equals 0 and
    165   // scratch_high LSR 31 equals zero.
    166   // New result = (result eor 0) + 0 = result.
    167   // If the input was negative, we have to negate the result.
    168   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
    169   // New result = (result eor 0xffffffff) + 1 = 0 - result.
    170   __ ShiftRightArith(r0, scratch_high, Operand(31));
    171 #if V8_TARGET_ARCH_S390X
    172   __ lgfr(r0, r0);
    173   __ ShiftRightP(r0, r0, Operand(32));
    174 #endif
    175   __ XorP(result_reg, r0);
    176   __ ShiftRight(r0, scratch_high, Operand(31));
    177   __ AddP(result_reg, r0);
    178 
    179   __ bind(&done);
    180   __ Pop(scratch_high, scratch_low);
    181 
    182   __ bind(&fastpath_done);
    183   __ pop(scratch);
    184 
    185   __ Ret();
    186 }
    187 
    188 // Handle the case where the lhs and rhs are the same object.
    189 // Equality is almost reflexive (everything but NaN), so this is a test
    190 // for "identity and not NaN".
    191 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
    192                                           Condition cond) {
    193   Label not_identical;
    194   Label heap_number, return_equal;
    195   __ CmpP(r2, r3);
    196   __ bne(&not_identical);
    197 
    198   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
    199   // so we do the second best thing - test it ourselves.
    200   // They are both equal and they are not both Smis so both of them are not
    201   // Smis.  If it's not a heap number, then return equal.
    202   if (cond == lt || cond == gt) {
    203     // Call runtime on identical JSObjects.
    204     __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
    205     __ bge(slow);
    206     // Call runtime on identical symbols since we need to throw a TypeError.
    207     __ CmpP(r6, Operand(SYMBOL_TYPE));
    208     __ beq(slow);
    209   } else {
    210     __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
    211     __ beq(&heap_number);
    212     // Comparing JS objects with <=, >= is complicated.
    213     if (cond != eq) {
    214       __ CmpP(r6, Operand(FIRST_JS_RECEIVER_TYPE));
    215       __ bge(slow);
    216       // Call runtime on identical symbols since we need to throw a TypeError.
    217       __ CmpP(r6, Operand(SYMBOL_TYPE));
    218       __ beq(slow);
    219       // Normally here we fall through to return_equal, but undefined is
    220       // special: (undefined == undefined) == true, but
    221       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
    222       if (cond == le || cond == ge) {
    223         __ CmpP(r6, Operand(ODDBALL_TYPE));
    224         __ bne(&return_equal);
    225         __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
    226         __ bne(&return_equal);
    227         if (cond == le) {
    228           // undefined <= undefined should fail.
    229           __ LoadImmP(r2, Operand(GREATER));
    230         } else {
    231           // undefined >= undefined should fail.
    232           __ LoadImmP(r2, Operand(LESS));
    233         }
    234         __ Ret();
    235       }
    236     }
    237   }
    238 
    239   __ bind(&return_equal);
    240   if (cond == lt) {
    241     __ LoadImmP(r2, Operand(GREATER));  // Things aren't less than themselves.
    242   } else if (cond == gt) {
    243     __ LoadImmP(r2, Operand(LESS));  // Things aren't greater than themselves.
    244   } else {
    245     __ LoadImmP(r2, Operand(EQUAL));  // Things are <=, >=, ==, === themselves
    246   }
    247   __ Ret();
    248 
    249   // For less and greater we don't have to check for NaN since the result of
    250   // x < x is false regardless.  For the others here is some code to check
    251   // for NaN.
    252   if (cond != lt && cond != gt) {
    253     __ bind(&heap_number);
    254     // It is a heap number, so return non-equal if it's NaN and equal if it's
    255     // not NaN.
    256 
    257     // The representation of NaN values has all exponent bits (52..62) set,
    258     // and not all mantissa bits (0..51) clear.
    259     // Read top bits of double representation (second word of value).
    260     __ LoadlW(r4, FieldMemOperand(r2, HeapNumber::kExponentOffset));
    261     // Test that exponent bits are all set.
    262     STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
    263     __ ExtractBitMask(r5, r4, HeapNumber::kExponentMask);
    264     __ CmpLogicalP(r5, Operand(0x7ff));
    265     __ bne(&return_equal);
    266 
    267     // Shift out flag and all exponent bits, retaining only mantissa.
    268     __ sll(r4, Operand(HeapNumber::kNonMantissaBitsInTopWord));
    269     // Or with all low-bits of mantissa.
    270     __ LoadlW(r5, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
    271     __ OrP(r2, r5, r4);
    272     __ CmpP(r2, Operand::Zero());
    273     // For equal we already have the right value in r2:  Return zero (equal)
    274     // if all bits in mantissa are zero (it's an Infinity) and non-zero if
    275     // not (it's a NaN).  For <= and >= we need to load r0 with the failing
    276     // value if it's a NaN.
    277     if (cond != eq) {
    278       Label not_equal;
    279       __ bne(&not_equal, Label::kNear);
    280       // All-zero means Infinity means equal.
    281       __ Ret();
    282       __ bind(&not_equal);
    283       if (cond == le) {
    284         __ LoadImmP(r2, Operand(GREATER));  // NaN <= NaN should fail.
    285       } else {
    286         __ LoadImmP(r2, Operand(LESS));  // NaN >= NaN should fail.
    287       }
    288     }
    289     __ Ret();
    290   }
    291   // No fall through here.
    292 
    293   __ bind(&not_identical);
    294 }
    295 
    296 // See comment at call site.
    297 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
    298                                     Register rhs, Label* lhs_not_nan,
    299                                     Label* slow, bool strict) {
    300   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
    301 
    302   Label rhs_is_smi;
    303   __ JumpIfSmi(rhs, &rhs_is_smi);
    304 
    305   // Lhs is a Smi.  Check whether the rhs is a heap number.
    306   __ CompareObjectType(rhs, r5, r6, HEAP_NUMBER_TYPE);
    307   if (strict) {
    308     // If rhs is not a number and lhs is a Smi then strict equality cannot
    309     // succeed.  Return non-equal
    310     // If rhs is r2 then there is already a non zero value in it.
    311     Label skip;
    312     __ beq(&skip, Label::kNear);
    313     if (!rhs.is(r2)) {
    314       __ mov(r2, Operand(NOT_EQUAL));
    315     }
    316     __ Ret();
    317     __ bind(&skip);
    318   } else {
    319     // Smi compared non-strictly with a non-Smi non-heap-number.  Call
    320     // the runtime.
    321     __ bne(slow);
    322   }
    323 
    324   // Lhs is a smi, rhs is a number.
    325   // Convert lhs to a double in d7.
    326   __ SmiToDouble(d7, lhs);
    327   // Load the double from rhs, tagged HeapNumber r2, to d6.
    328   __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    329 
    330   // We now have both loaded as doubles but we can skip the lhs nan check
    331   // since it's a smi.
    332   __ b(lhs_not_nan);
    333 
    334   __ bind(&rhs_is_smi);
    335   // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
    336   __ CompareObjectType(lhs, r6, r6, HEAP_NUMBER_TYPE);
    337   if (strict) {
    338     // If lhs is not a number and rhs is a smi then strict equality cannot
    339     // succeed.  Return non-equal.
    340     // If lhs is r2 then there is already a non zero value in it.
    341     Label skip;
    342     __ beq(&skip, Label::kNear);
    343     if (!lhs.is(r2)) {
    344       __ mov(r2, Operand(NOT_EQUAL));
    345     }
    346     __ Ret();
    347     __ bind(&skip);
    348   } else {
    349     // Smi compared non-strictly with a non-smi non-heap-number.  Call
    350     // the runtime.
    351     __ bne(slow);
    352   }
    353 
    354   // Rhs is a smi, lhs is a heap number.
    355   // Load the double from lhs, tagged HeapNumber r3, to d7.
    356   __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    357   // Convert rhs to a double in d6.
    358   __ SmiToDouble(d6, rhs);
    359   // Fall through to both_loaded_as_doubles.
    360 }
    361 
    362 // See comment at call site.
    363 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
    364                                            Register rhs) {
    365   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
    366 
    367   // If either operand is a JS object or an oddball value, then they are
    368   // not equal since their pointers are different.
    369   // There is no test for undetectability in strict equality.
    370   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    371   Label first_non_object;
    372   // Get the type of the first operand into r4 and compare it with
    373   // FIRST_JS_RECEIVER_TYPE.
    374   __ CompareObjectType(rhs, r4, r4, FIRST_JS_RECEIVER_TYPE);
    375   __ blt(&first_non_object, Label::kNear);
    376 
    377   // Return non-zero (r2 is not zero)
    378   Label return_not_equal;
    379   __ bind(&return_not_equal);
    380   __ Ret();
    381 
    382   __ bind(&first_non_object);
    383   // Check for oddballs: true, false, null, undefined.
    384   __ CmpP(r4, Operand(ODDBALL_TYPE));
    385   __ beq(&return_not_equal);
    386 
    387   __ CompareObjectType(lhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
    388   __ bge(&return_not_equal);
    389 
    390   // Check for oddballs: true, false, null, undefined.
    391   __ CmpP(r5, Operand(ODDBALL_TYPE));
    392   __ beq(&return_not_equal);
    393 
    394   // Now that we have the types we might as well check for
    395   // internalized-internalized.
    396   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    397   __ OrP(r4, r4, r5);
    398   __ AndP(r0, r4, Operand(kIsNotStringMask | kIsNotInternalizedMask));
    399   __ beq(&return_not_equal);
    400 }
    401 
    402 // See comment at call site.
    403 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
    404                                        Register rhs,
    405                                        Label* both_loaded_as_doubles,
    406                                        Label* not_heap_numbers, Label* slow) {
    407   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
    408 
    409   __ CompareObjectType(rhs, r5, r4, HEAP_NUMBER_TYPE);
    410   __ bne(not_heap_numbers);
    411   __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
    412   __ CmpP(r4, r5);
    413   __ bne(slow);  // First was a heap number, second wasn't.  Go slow case.
    414 
    415   // Both are heap numbers.  Load them up then jump to the code we have
    416   // for that.
    417   __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
    418   __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
    419 
    420   __ b(both_loaded_as_doubles);
    421 }
    422 
    423 // Fast negative check for internalized-to-internalized equality or receiver
    424 // equality. Also handles the undetectable receiver to null/undefined
    425 // comparison.
    426 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
    427                                                      Register lhs, Register rhs,
    428                                                      Label* possible_strings,
    429                                                      Label* runtime_call) {
    430   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
    431 
    432   // r4 is object type of rhs.
    433   Label object_test, return_equal, return_unequal, undetectable;
    434   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    435   __ mov(r0, Operand(kIsNotStringMask));
    436   __ AndP(r0, r4);
    437   __ bne(&object_test, Label::kNear);
    438   __ mov(r0, Operand(kIsNotInternalizedMask));
    439   __ AndP(r0, r4);
    440   __ bne(possible_strings);
    441   __ CompareObjectType(lhs, r5, r5, FIRST_NONSTRING_TYPE);
    442   __ bge(runtime_call);
    443   __ mov(r0, Operand(kIsNotInternalizedMask));
    444   __ AndP(r0, r5);
    445   __ bne(possible_strings);
    446 
    447   // Both are internalized. We already checked they weren't the same pointer so
    448   // they are not equal. Return non-equal by returning the non-zero object
    449   // pointer in r2.
    450   __ Ret();
    451 
    452   __ bind(&object_test);
    453   __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
    454   __ LoadP(r5, FieldMemOperand(rhs, HeapObject::kMapOffset));
    455   __ LoadlB(r6, FieldMemOperand(r4, Map::kBitFieldOffset));
    456   __ LoadlB(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
    457   __ AndP(r0, r6, Operand(1 << Map::kIsUndetectable));
    458   __ bne(&undetectable);
    459   __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
    460   __ bne(&return_unequal);
    461 
    462   __ CompareInstanceType(r4, r4, FIRST_JS_RECEIVER_TYPE);
    463   __ blt(runtime_call);
    464   __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
    465   __ blt(runtime_call);
    466 
    467   __ bind(&return_unequal);
    468   // Return non-equal by returning the non-zero object pointer in r2.
    469   __ Ret();
    470 
    471   __ bind(&undetectable);
    472   __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
    473   __ beq(&return_unequal);
    474 
    475   // If both sides are JSReceivers, then the result is false according to
    476   // the HTML specification, which says that only comparisons with null or
    477   // undefined are affected by special casing for document.all.
    478   __ CompareInstanceType(r4, r4, ODDBALL_TYPE);
    479   __ beq(&return_equal);
    480   __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
    481   __ bne(&return_unequal);
    482 
    483   __ bind(&return_equal);
    484   __ LoadImmP(r2, Operand(EQUAL));
    485   __ Ret();
    486 }
    487 
    488 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
    489                                          Register scratch,
    490                                          CompareICState::State expected,
    491                                          Label* fail) {
    492   Label ok;
    493   if (expected == CompareICState::SMI) {
    494     __ JumpIfNotSmi(input, fail);
    495   } else if (expected == CompareICState::NUMBER) {
    496     __ JumpIfSmi(input, &ok);
    497     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
    498                 DONT_DO_SMI_CHECK);
    499   }
    500   // We could be strict about internalized/non-internalized here, but as long as
    501   // hydrogen doesn't care, the stub doesn't have to care either.
    502   __ bind(&ok);
    503 }
    504 
    505 // On entry r3 and r4 are the values to be compared.
    506 // On exit r2 is 0, positive or negative to indicate the result of
    507 // the comparison.
    508 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
    509   Register lhs = r3;
    510   Register rhs = r2;
    511   Condition cc = GetCondition();
    512 
    513   Label miss;
    514   CompareICStub_CheckInputType(masm, lhs, r4, left(), &miss);
    515   CompareICStub_CheckInputType(masm, rhs, r5, right(), &miss);
    516 
    517   Label slow;  // Call builtin.
    518   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
    519 
    520   Label not_two_smis, smi_done;
    521   __ OrP(r4, r3, r2);
    522   __ JumpIfNotSmi(r4, &not_two_smis);
    523   __ SmiUntag(r3);
    524   __ SmiUntag(r2);
    525   __ SubP(r2, r3, r2);
    526   __ Ret();
    527   __ bind(&not_two_smis);
    528 
    529   // NOTICE! This code is only reached after a smi-fast-case check, so
    530   // it is certain that at least one operand isn't a smi.
    531 
    532   // Handle the case where the objects are identical.  Either returns the answer
    533   // or goes to slow.  Only falls through if the objects were not identical.
    534   EmitIdenticalObjectComparison(masm, &slow, cc);
    535 
    536   // If either is a Smi (we know that not both are), then they can only
    537   // be strictly equal if the other is a HeapNumber.
    538   STATIC_ASSERT(kSmiTag == 0);
    539   DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
    540   __ AndP(r4, lhs, rhs);
    541   __ JumpIfNotSmi(r4, &not_smis);
    542   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
    543   // 1) Return the answer.
    544   // 2) Go to slow.
    545   // 3) Fall through to both_loaded_as_doubles.
    546   // 4) Jump to lhs_not_nan.
    547   // In cases 3 and 4 we have found out we were dealing with a number-number
    548   // comparison.  The double values of the numbers have been loaded
    549   // into d7 and d6.
    550   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
    551 
    552   __ bind(&both_loaded_as_doubles);
    553   // The arguments have been converted to doubles and stored in d6 and d7
    554   __ bind(&lhs_not_nan);
    555   Label no_nan;
    556   __ cdbr(d7, d6);
    557 
    558   Label nan, equal, less_than;
    559   __ bunordered(&nan);
    560   __ beq(&equal, Label::kNear);
    561   __ blt(&less_than, Label::kNear);
    562   __ LoadImmP(r2, Operand(GREATER));
    563   __ Ret();
    564   __ bind(&equal);
    565   __ LoadImmP(r2, Operand(EQUAL));
    566   __ Ret();
    567   __ bind(&less_than);
    568   __ LoadImmP(r2, Operand(LESS));
    569   __ Ret();
    570 
    571   __ bind(&nan);
    572   // If one of the sides was a NaN then the v flag is set.  Load r2 with
    573   // whatever it takes to make the comparison fail, since comparisons with NaN
    574   // always fail.
    575   if (cc == lt || cc == le) {
    576     __ LoadImmP(r2, Operand(GREATER));
    577   } else {
    578     __ LoadImmP(r2, Operand(LESS));
    579   }
    580   __ Ret();
    581 
    582   __ bind(&not_smis);
    583   // At this point we know we are dealing with two different objects,
    584   // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
    585   if (strict()) {
    586     // This returns non-equal for some object types, or falls through if it
    587     // was not lucky.
    588     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
    589   }
    590 
    591   Label check_for_internalized_strings;
    592   Label flat_string_check;
    593   // Check for heap-number-heap-number comparison.  Can jump to slow case,
    594   // or load both doubles into r2, r3, r4, r5 and jump to the code that handles
    595   // that case.  If the inputs are not doubles then jumps to
    596   // check_for_internalized_strings.
    597   // In this case r4 will contain the type of rhs_.  Never falls through.
    598   EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
    599                              &check_for_internalized_strings,
    600                              &flat_string_check);
    601 
    602   __ bind(&check_for_internalized_strings);
    603   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
    604   // internalized strings.
    605   if (cc == eq && !strict()) {
    606     // Returns an answer for two internalized strings or two detectable objects.
    607     // Otherwise jumps to string case or not both strings case.
    608     // Assumes that r4 is the type of rhs_ on entry.
    609     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
    610                                              &slow);
    611   }
    612 
    613   // Check for both being sequential one-byte strings,
    614   // and inline if that is the case.
    615   __ bind(&flat_string_check);
    616 
    617   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r4, r5, &slow);
    618 
    619   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
    620                       r5);
    621   if (cc == eq) {
    622     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r4, r5);
    623   } else {
    624     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r4, r5, r6);
    625   }
    626   // Never falls through to here.
    627 
    628   __ bind(&slow);
    629 
    630   if (cc == eq) {
    631     {
    632       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
    633       __ Push(cp);
    634       __ Call(strict() ? isolate()->builtins()->StrictEqual()
    635                        : isolate()->builtins()->Equal(),
    636               RelocInfo::CODE_TARGET);
    637       __ Pop(cp);
    638     }
    639     // Turn true into 0 and false into some non-zero value.
    640     STATIC_ASSERT(EQUAL == 0);
    641     __ LoadRoot(r3, Heap::kTrueValueRootIndex);
    642     __ SubP(r2, r2, r3);
    643     __ Ret();
    644   } else {
    645     __ Push(lhs, rhs);
    646     int ncr;  // NaN compare result
    647     if (cc == lt || cc == le) {
    648       ncr = GREATER;
    649     } else {
    650       DCHECK(cc == gt || cc == ge);  // remaining cases
    651       ncr = LESS;
    652     }
    653     __ LoadSmiLiteral(r2, Smi::FromInt(ncr));
    654     __ push(r2);
    655 
    656     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    657     // tagged as a small integer.
    658     __ TailCallRuntime(Runtime::kCompare);
    659   }
    660 
    661   __ bind(&miss);
    662   GenerateMiss(masm);
    663 }
    664 
    665 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
    666   // We don't allow a GC during a store buffer overflow so there is no need to
    667   // store the registers in any particular way, but we do have to store and
    668   // restore them.
    669   __ MultiPush(kJSCallerSaved | r14.bit());
    670   if (save_doubles()) {
    671     __ MultiPushDoubles(kCallerSavedDoubles);
    672   }
    673   const int argument_count = 1;
    674   const int fp_argument_count = 0;
    675   const Register scratch = r3;
    676 
    677   AllowExternalCallThatCantCauseGC scope(masm);
    678   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
    679   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
    680   __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
    681                    argument_count);
    682   if (save_doubles()) {
    683     __ MultiPopDoubles(kCallerSavedDoubles);
    684   }
    685   __ MultiPop(kJSCallerSaved | r14.bit());
    686   __ Ret();
    687 }
    688 
    689 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
    690   __ PushSafepointRegisters();
    691   __ b(r14);
    692 }
    693 
    694 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
    695   __ PopSafepointRegisters();
    696   __ b(r14);
    697 }
    698 
    699 void MathPowStub::Generate(MacroAssembler* masm) {
    700   const Register exponent = MathPowTaggedDescriptor::exponent();
    701   DCHECK(exponent.is(r4));
    702   const DoubleRegister double_base = d1;
    703   const DoubleRegister double_exponent = d2;
    704   const DoubleRegister double_result = d3;
    705   const DoubleRegister double_scratch = d0;
    706   const Register scratch = r1;
    707   const Register scratch2 = r9;
    708 
    709   Label call_runtime, done, int_exponent;
    710   if (exponent_type() == TAGGED) {
    711     // Base is already in double_base.
    712     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
    713 
    714     __ LoadDouble(double_exponent,
    715                   FieldMemOperand(exponent, HeapNumber::kValueOffset));
    716   }
    717 
    718   if (exponent_type() != INTEGER) {
    719     // Detect integer exponents stored as double.
    720     __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
    721                              double_scratch);
    722     __ beq(&int_exponent, Label::kNear);
    723 
    724     __ push(r14);
    725     {
    726       AllowExternalCallThatCantCauseGC scope(masm);
    727       __ PrepareCallCFunction(0, 2, scratch);
    728       __ MovToFloatParameters(double_base, double_exponent);
    729       __ CallCFunction(
    730           ExternalReference::power_double_double_function(isolate()), 0, 2);
    731     }
    732     __ pop(r14);
    733     __ MovFromFloatResult(double_result);
    734     __ b(&done);
    735   }
    736 
    737   // Calculate power with integer exponent.
    738   __ bind(&int_exponent);
    739 
    740   // Get two copies of exponent in the registers scratch and exponent.
    741   if (exponent_type() == INTEGER) {
    742     __ LoadRR(scratch, exponent);
    743   } else {
    744     // Exponent has previously been stored into scratch as untagged integer.
    745     __ LoadRR(exponent, scratch);
    746   }
    747   __ ldr(double_scratch, double_base);  // Back up base.
    748   __ LoadImmP(scratch2, Operand(1));
    749   __ ConvertIntToDouble(scratch2, double_result);
    750 
    751   // Get absolute value of exponent.
    752   Label positive_exponent;
    753   __ CmpP(scratch, Operand::Zero());
    754   __ bge(&positive_exponent, Label::kNear);
    755   __ LoadComplementRR(scratch, scratch);
    756   __ bind(&positive_exponent);
    757 
    758   Label while_true, no_carry, loop_end;
    759   __ bind(&while_true);
    760   __ mov(scratch2, Operand(1));
    761   __ AndP(scratch2, scratch);
    762   __ beq(&no_carry, Label::kNear);
    763   __ mdbr(double_result, double_scratch);
    764   __ bind(&no_carry);
    765   __ ShiftRightP(scratch, scratch, Operand(1));
    766   __ LoadAndTestP(scratch, scratch);
    767   __ beq(&loop_end, Label::kNear);
    768   __ mdbr(double_scratch, double_scratch);
    769   __ b(&while_true);
    770   __ bind(&loop_end);
    771 
    772   __ CmpP(exponent, Operand::Zero());
    773   __ bge(&done);
    774 
    775   // get 1/double_result:
    776   __ ldr(double_scratch, double_result);
    777   __ LoadImmP(scratch2, Operand(1));
    778   __ ConvertIntToDouble(scratch2, double_result);
    779   __ ddbr(double_result, double_scratch);
    780 
    781   // Test whether result is zero.  Bail out to check for subnormal result.
    782   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
    783   __ lzdr(kDoubleRegZero);
    784   __ cdbr(double_result, kDoubleRegZero);
    785   __ bne(&done, Label::kNear);
    786   // double_exponent may not containe the exponent value if the input was a
    787   // smi.  We set it with exponent value before bailing out.
    788   __ ConvertIntToDouble(exponent, double_exponent);
    789 
    790   // Returning or bailing out.
    791   __ push(r14);
    792   {
    793     AllowExternalCallThatCantCauseGC scope(masm);
    794     __ PrepareCallCFunction(0, 2, scratch);
    795     __ MovToFloatParameters(double_base, double_exponent);
    796     __ CallCFunction(
    797         ExternalReference::power_double_double_function(isolate()), 0, 2);
    798   }
    799   __ pop(r14);
    800   __ MovFromFloatResult(double_result);
    801 
    802   __ bind(&done);
    803   __ Ret();
    804 }
    805 
    806 bool CEntryStub::NeedsImmovableCode() { return true; }
    807 
    808 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
    809   CEntryStub::GenerateAheadOfTime(isolate);
    810   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
    811   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
    812   CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
    813   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
    814   CreateWeakCellStub::GenerateAheadOfTime(isolate);
    815   BinaryOpICStub::GenerateAheadOfTime(isolate);
    816   StoreRegistersStateStub::GenerateAheadOfTime(isolate);
    817   RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
    818   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
    819   StoreFastElementStub::GenerateAheadOfTime(isolate);
    820 }
    821 
    822 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
    823   StoreRegistersStateStub stub(isolate);
    824   stub.GetCode();
    825 }
    826 
    827 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
    828   RestoreRegistersStateStub stub(isolate);
    829   stub.GetCode();
    830 }
    831 
    832 void CodeStub::GenerateFPStubs(Isolate* isolate) {
    833   SaveFPRegsMode mode = kSaveFPRegs;
    834   CEntryStub(isolate, 1, mode).GetCode();
    835   StoreBufferOverflowStub(isolate, mode).GetCode();
    836 }
    837 
    838 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
    839   CEntryStub stub(isolate, 1, kDontSaveFPRegs);
    840   stub.GetCode();
    841 }
    842 
    843 void CEntryStub::Generate(MacroAssembler* masm) {
    844   // Called from JavaScript; parameters are on stack as if calling JS function.
    845   // r2: number of arguments including receiver
    846   // r3: pointer to builtin function
    847   // fp: frame pointer  (restored after C call)
    848   // sp: stack pointer  (restored as callee's sp after C call)
    849   // cp: current context  (C callee-saved)
    850   //
    851   // If argv_in_register():
    852   // r4: pointer to the first argument
    853   ProfileEntryHookStub::MaybeCallEntryHook(masm);
    854 
    855   __ LoadRR(r7, r3);
    856 
    857   if (argv_in_register()) {
    858     // Move argv into the correct register.
    859     __ LoadRR(r3, r4);
    860   } else {
    861     // Compute the argv pointer.
    862     __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
    863     __ lay(r3, MemOperand(r3, sp, -kPointerSize));
    864   }
    865 
    866   // Enter the exit frame that transitions from JavaScript to C++.
    867   FrameScope scope(masm, StackFrame::MANUAL);
    868 
    869   // Need at least one extra slot for return address location.
    870   int arg_stack_space = 1;
    871 
    872   // Pass buffer for return value on stack if necessary
    873   bool needs_return_buffer =
    874       result_size() > 2 ||
    875       (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS);
    876   if (needs_return_buffer) {
    877     arg_stack_space += result_size();
    878   }
    879 
    880 #if V8_TARGET_ARCH_S390X
    881   // 64-bit linux pass Argument object by reference not value
    882   arg_stack_space += 2;
    883 #endif
    884 
    885   __ EnterExitFrame(save_doubles(), arg_stack_space, is_builtin_exit()
    886                                            ? StackFrame::BUILTIN_EXIT
    887                                            : StackFrame::EXIT);
    888 
    889   // Store a copy of argc, argv in callee-saved registers for later.
    890   __ LoadRR(r6, r2);
    891   __ LoadRR(r8, r3);
    892   // r2, r6: number of arguments including receiver  (C callee-saved)
    893   // r3, r8: pointer to the first argument
    894   // r7: pointer to builtin function  (C callee-saved)
    895 
    896   // Result returned in registers or stack, depending on result size and ABI.
    897 
    898   Register isolate_reg = r4;
    899   if (needs_return_buffer) {
    900     // The return value is 16-byte non-scalar value.
    901     // Use frame storage reserved by calling function to pass return
    902     // buffer as implicit first argument in R2.  Shfit original parameters
    903     // by one register each.
    904     __ LoadRR(r4, r3);
    905     __ LoadRR(r3, r2);
    906     __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
    907     isolate_reg = r5;
    908   }
    909   // Call C built-in.
    910   __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
    911 
    912   Register target = r7;
    913 
    914   // To let the GC traverse the return address of the exit frames, we need to
    915   // know where the return address is. The CEntryStub is unmovable, so
    916   // we can store the address on the stack to be able to find it again and
    917   // we never have to restore it, because it will not change.
    918   {
    919     Label return_label;
    920     __ larl(r14, &return_label);  // Generate the return addr of call later.
    921     __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
    922 
    923     // zLinux ABI requires caller's frame to have sufficient space for callee
    924     // preserved regsiter save area.
    925     // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
    926     __ b(target);
    927     __ bind(&return_label);
    928     // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
    929   }
    930 
    931   // If return value is on the stack, pop it to registers.
    932   if (needs_return_buffer) {
    933     if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize));
    934     __ LoadP(r3, MemOperand(r2, kPointerSize));
    935     __ LoadP(r2, MemOperand(r2));
    936   }
    937 
    938   // Check result for exception sentinel.
    939   Label exception_returned;
    940   __ CompareRoot(r2, Heap::kExceptionRootIndex);
    941   __ beq(&exception_returned, Label::kNear);
    942 
    943   // Check that there is no pending exception, otherwise we
    944   // should have returned the exception sentinel.
    945   if (FLAG_debug_code) {
    946     Label okay;
    947     ExternalReference pending_exception_address(
    948         Isolate::kPendingExceptionAddress, isolate());
    949     __ mov(r1, Operand(pending_exception_address));
    950     __ LoadP(r1, MemOperand(r1));
    951     __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
    952     // Cannot use check here as it attempts to generate call into runtime.
    953     __ beq(&okay, Label::kNear);
    954     __ stop("Unexpected pending exception");
    955     __ bind(&okay);
    956   }
    957 
    958   // Exit C frame and return.
    959   // r2:r3: result
    960   // sp: stack pointer
    961   // fp: frame pointer
    962   Register argc;
    963   if (argv_in_register()) {
    964     // We don't want to pop arguments so set argc to no_reg.
    965     argc = no_reg;
    966   } else {
    967     // r6: still holds argc (callee-saved).
    968     argc = r6;
    969   }
    970   __ LeaveExitFrame(save_doubles(), argc, true);
    971   __ b(r14);
    972 
    973   // Handling of exception.
    974   __ bind(&exception_returned);
    975 
    976   ExternalReference pending_handler_context_address(
    977       Isolate::kPendingHandlerContextAddress, isolate());
    978   ExternalReference pending_handler_code_address(
    979       Isolate::kPendingHandlerCodeAddress, isolate());
    980   ExternalReference pending_handler_offset_address(
    981       Isolate::kPendingHandlerOffsetAddress, isolate());
    982   ExternalReference pending_handler_fp_address(
    983       Isolate::kPendingHandlerFPAddress, isolate());
    984   ExternalReference pending_handler_sp_address(
    985       Isolate::kPendingHandlerSPAddress, isolate());
    986 
    987   // Ask the runtime for help to determine the handler. This will set r3 to
    988   // contain the current pending exception, don't clobber it.
    989   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
    990                                  isolate());
    991   {
    992     FrameScope scope(masm, StackFrame::MANUAL);
    993     __ PrepareCallCFunction(3, 0, r2);
    994     __ LoadImmP(r2, Operand::Zero());
    995     __ LoadImmP(r3, Operand::Zero());
    996     __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
    997     __ CallCFunction(find_handler, 3);
    998   }
    999 
   1000   // Retrieve the handler context, SP and FP.
   1001   __ mov(cp, Operand(pending_handler_context_address));
   1002   __ LoadP(cp, MemOperand(cp));
   1003   __ mov(sp, Operand(pending_handler_sp_address));
   1004   __ LoadP(sp, MemOperand(sp));
   1005   __ mov(fp, Operand(pending_handler_fp_address));
   1006   __ LoadP(fp, MemOperand(fp));
   1007 
   1008   // If the handler is a JS frame, restore the context to the frame. Note that
   1009   // the context will be set to (cp == 0) for non-JS frames.
   1010   Label skip;
   1011   __ CmpP(cp, Operand::Zero());
   1012   __ beq(&skip, Label::kNear);
   1013   __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1014   __ bind(&skip);
   1015 
   1016   // Compute the handler entry address and jump to it.
   1017   __ mov(r3, Operand(pending_handler_code_address));
   1018   __ LoadP(r3, MemOperand(r3));
   1019   __ mov(r4, Operand(pending_handler_offset_address));
   1020   __ LoadP(r4, MemOperand(r4));
   1021   __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
   1022   __ AddP(ip, r3, r4);
   1023   __ Jump(ip);
   1024 }
   1025 
   1026 void JSEntryStub::Generate(MacroAssembler* masm) {
   1027   // r2: code entry
   1028   // r3: function
   1029   // r4: receiver
   1030   // r5: argc
   1031   // r6: argv
   1032 
   1033   Label invoke, handler_entry, exit;
   1034 
   1035   ProfileEntryHookStub::MaybeCallEntryHook(masm);
   1036 
   1037 // saving floating point registers
   1038 #if V8_TARGET_ARCH_S390X
   1039   // 64bit ABI requires f8 to f15 be saved
   1040   __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
   1041   __ std(d8, MemOperand(sp));
   1042   __ std(d9, MemOperand(sp, 1 * kDoubleSize));
   1043   __ std(d10, MemOperand(sp, 2 * kDoubleSize));
   1044   __ std(d11, MemOperand(sp, 3 * kDoubleSize));
   1045   __ std(d12, MemOperand(sp, 4 * kDoubleSize));
   1046   __ std(d13, MemOperand(sp, 5 * kDoubleSize));
   1047   __ std(d14, MemOperand(sp, 6 * kDoubleSize));
   1048   __ std(d15, MemOperand(sp, 7 * kDoubleSize));
   1049 #else
   1050   // 31bit ABI requires you to store f4 and f6:
   1051   // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
   1052   __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
   1053   __ std(d4, MemOperand(sp));
   1054   __ std(d6, MemOperand(sp, kDoubleSize));
   1055 #endif
   1056 
   1057   // zLinux ABI
   1058   //    Incoming parameters:
   1059   //          r2: code entry
   1060   //          r3: function
   1061   //          r4: receiver
   1062   //          r5: argc
   1063   //          r6: argv
   1064   //    Requires us to save the callee-preserved registers r6-r13
   1065   //    General convention is to also save r14 (return addr) and
   1066   //    sp/r15 as well in a single STM/STMG
   1067   __ lay(sp, MemOperand(sp, -10 * kPointerSize));
   1068   __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
   1069 
   1070   // Set up the reserved register for 0.0.
   1071   // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
   1072 
   1073   // Push a frame with special values setup to mark it as an entry frame.
   1074   //   Bad FP (-1)
   1075   //   SMI Marker
   1076   //   SMI Marker
   1077   //   kCEntryFPAddress
   1078   //   Frame type
   1079   __ lay(sp, MemOperand(sp, -5 * kPointerSize));
   1080   // Push a bad frame pointer to fail if it is used.
   1081   __ LoadImmP(r10, Operand(-1));
   1082 
   1083   StackFrame::Type marker = type();
   1084   __ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
   1085   __ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
   1086   // Save copies of the top frame descriptor on the stack.
   1087   __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1088   __ LoadP(r7, MemOperand(r7));
   1089   __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
   1090   // Set up frame pointer for the frame to be pushed.
   1091   // Need to add kPointerSize, because sp has one extra
   1092   // frame already for the frame type being pushed later.
   1093   __ lay(fp,
   1094          MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
   1095 
   1096   // If this is the outermost JS call, set js_entry_sp value.
   1097   Label non_outermost_js;
   1098   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
   1099   __ mov(r7, Operand(ExternalReference(js_entry_sp)));
   1100   __ LoadAndTestP(r8, MemOperand(r7));
   1101   __ bne(&non_outermost_js, Label::kNear);
   1102   __ StoreP(fp, MemOperand(r7));
   1103   __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1104   Label cont;
   1105   __ b(&cont, Label::kNear);
   1106   __ bind(&non_outermost_js);
   1107   __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
   1108 
   1109   __ bind(&cont);
   1110   __ StoreP(ip, MemOperand(sp));  // frame-type
   1111 
   1112   // Jump to a faked try block that does the invoke, with a faked catch
   1113   // block that sets the pending exception.
   1114   __ b(&invoke, Label::kNear);
   1115 
   1116   __ bind(&handler_entry);
   1117   handler_offset_ = handler_entry.pos();
   1118   // Caught exception: Store result (exception) in the pending exception
   1119   // field in the JSEnv and return a failure sentinel.  Coming in here the
   1120   // fp will be invalid because the PushStackHandler below sets it to 0 to
   1121   // signal the existence of the JSEntry frame.
   1122   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1123                                        isolate())));
   1124 
   1125   __ StoreP(r2, MemOperand(ip));
   1126   __ LoadRoot(r2, Heap::kExceptionRootIndex);
   1127   __ b(&exit, Label::kNear);
   1128 
   1129   // Invoke: Link this frame into the handler chain.
   1130   __ bind(&invoke);
   1131   // Must preserve r2-r6.
   1132   __ PushStackHandler();
   1133   // If an exception not caught by another handler occurs, this handler
   1134   // returns control to the code after the b(&invoke) above, which
   1135   // restores all kCalleeSaved registers (including cp and fp) to their
   1136   // saved values before returning a failure to C.
   1137 
   1138   // Invoke the function by calling through JS entry trampoline builtin.
   1139   // Notice that we cannot store a reference to the trampoline code directly in
   1140   // this stub, because runtime stubs are not traversed when doing GC.
   1141 
   1142   // Expected registers by Builtins::JSEntryTrampoline
   1143   // r2: code entry
   1144   // r3: function
   1145   // r4: receiver
   1146   // r5: argc
   1147   // r6: argv
   1148   if (type() == StackFrame::ENTRY_CONSTRUCT) {
   1149     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
   1150                                       isolate());
   1151     __ mov(ip, Operand(construct_entry));
   1152   } else {
   1153     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
   1154     __ mov(ip, Operand(entry));
   1155   }
   1156   __ LoadP(ip, MemOperand(ip));  // deref address
   1157 
   1158   // Branch and link to JSEntryTrampoline.
   1159   // the address points to the start of the code object, skip the header
   1160   __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   1161   Label return_addr;
   1162   // __ basr(r14, ip);
   1163   __ larl(r14, &return_addr);
   1164   __ b(ip);
   1165   __ bind(&return_addr);
   1166 
   1167   // Unlink this frame from the handler chain.
   1168   __ PopStackHandler();
   1169 
   1170   __ bind(&exit);  // r2 holds result
   1171   // Check if the current stack frame is marked as the outermost JS frame.
   1172   Label non_outermost_js_2;
   1173   __ pop(r7);
   1174   __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   1175   __ bne(&non_outermost_js_2, Label::kNear);
   1176   __ mov(r8, Operand::Zero());
   1177   __ mov(r7, Operand(ExternalReference(js_entry_sp)));
   1178   __ StoreP(r8, MemOperand(r7));
   1179   __ bind(&non_outermost_js_2);
   1180 
   1181   // Restore the top frame descriptors from the stack.
   1182   __ pop(r5);
   1183   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   1184   __ StoreP(r5, MemOperand(ip));
   1185 
   1186   // Reset the stack to the callee saved registers.
   1187   __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
   1188 
   1189   // Reload callee-saved preserved regs, return address reg (r14) and sp
   1190   __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
   1191   __ la(sp, MemOperand(sp, 10 * kPointerSize));
   1192 
   1193 // saving floating point registers
   1194 #if V8_TARGET_ARCH_S390X
   1195   // 64bit ABI requires f8 to f15 be saved
   1196   __ ld(d8, MemOperand(sp));
   1197   __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
   1198   __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
   1199   __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
   1200   __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
   1201   __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
   1202   __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
   1203   __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
   1204   __ la(sp, MemOperand(sp, 8 * kDoubleSize));
   1205 #else
   1206   // 31bit ABI requires you to store f4 and f6:
   1207   // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
   1208   __ ld(d4, MemOperand(sp));
   1209   __ ld(d6, MemOperand(sp, kDoubleSize));
   1210   __ la(sp, MemOperand(sp, 2 * kDoubleSize));
   1211 #endif
   1212 
   1213   __ b(r14);
   1214 }
   1215 
   1216 void RegExpExecStub::Generate(MacroAssembler* masm) {
   1217 // Just jump directly to runtime if native RegExp is not selected at compile
   1218 // time or if regexp entry in generated code is turned off runtime switch or
   1219 // at compilation.
   1220 #ifdef V8_INTERPRETED_REGEXP
   1221   __ TailCallRuntime(Runtime::kRegExpExec);
   1222 #else   // V8_INTERPRETED_REGEXP
   1223 
   1224   // Stack frame on entry.
   1225   //  sp[0]: last_match_info (expected JSArray)
   1226   //  sp[4]: previous index
   1227   //  sp[8]: subject string
   1228   //  sp[12]: JSRegExp object
   1229 
   1230   const int kLastMatchInfoOffset = 0 * kPointerSize;
   1231   const int kPreviousIndexOffset = 1 * kPointerSize;
   1232   const int kSubjectOffset = 2 * kPointerSize;
   1233   const int kJSRegExpOffset = 3 * kPointerSize;
   1234 
   1235   Label runtime, br_over, encoding_type_UC16;
   1236 
   1237   // Allocation of registers for this function. These are in callee save
   1238   // registers and will be preserved by the call to the native RegExp code, as
   1239   // this code is called using the normal C calling convention. When calling
   1240   // directly from generated code the native RegExp code will not do a GC and
   1241   // therefore the content of these registers are safe to use after the call.
   1242   Register subject = r6;
   1243   Register regexp_data = r7;
   1244   Register last_match_info_elements = r8;
   1245   Register code = r9;
   1246 
   1247   __ CleanseP(r14);
   1248 
   1249   // Ensure register assigments are consistent with callee save masks
   1250   DCHECK(subject.bit() & kCalleeSaved);
   1251   DCHECK(regexp_data.bit() & kCalleeSaved);
   1252   DCHECK(last_match_info_elements.bit() & kCalleeSaved);
   1253   DCHECK(code.bit() & kCalleeSaved);
   1254 
   1255   // Ensure that a RegExp stack is allocated.
   1256   ExternalReference address_of_regexp_stack_memory_address =
   1257       ExternalReference::address_of_regexp_stack_memory_address(isolate());
   1258   ExternalReference address_of_regexp_stack_memory_size =
   1259       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   1260   __ mov(r2, Operand(address_of_regexp_stack_memory_size));
   1261   __ LoadAndTestP(r2, MemOperand(r2));
   1262   __ beq(&runtime);
   1263 
   1264   // Check that the first argument is a JSRegExp object.
   1265   __ LoadP(r2, MemOperand(sp, kJSRegExpOffset));
   1266   __ JumpIfSmi(r2, &runtime);
   1267   __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
   1268   __ bne(&runtime);
   1269 
   1270   // Check that the RegExp has been compiled (data contains a fixed array).
   1271   __ LoadP(regexp_data, FieldMemOperand(r2, JSRegExp::kDataOffset));
   1272   if (FLAG_debug_code) {
   1273     __ TestIfSmi(regexp_data);
   1274     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
   1275     __ CompareObjectType(regexp_data, r2, r2, FIXED_ARRAY_TYPE);
   1276     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
   1277   }
   1278 
   1279   // regexp_data: RegExp data (FixedArray)
   1280   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   1281   __ LoadP(r2, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
   1282   // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
   1283   __ CmpSmiLiteral(r2, Smi::FromInt(JSRegExp::IRREGEXP), r0);
   1284   __ bne(&runtime);
   1285 
   1286   // regexp_data: RegExp data (FixedArray)
   1287   // Check that the number of captures fit in the static offsets vector buffer.
   1288   __ LoadP(r4,
   1289            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   1290   // Check (number_of_captures + 1) * 2 <= offsets vector size
   1291   // Or          number_of_captures * 2 <= offsets vector size - 2
   1292   // SmiToShortArrayOffset accomplishes the multiplication by 2 and
   1293   // SmiUntag (which is a nop for 32-bit).
   1294   __ SmiToShortArrayOffset(r4, r4);
   1295   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
   1296   __ CmpLogicalP(r4, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
   1297   __ bgt(&runtime);
   1298 
   1299   // Reset offset for possibly sliced string.
   1300   __ LoadImmP(ip, Operand::Zero());
   1301   __ LoadP(subject, MemOperand(sp, kSubjectOffset));
   1302   __ JumpIfSmi(subject, &runtime);
   1303   __ LoadRR(r5, subject);  // Make a copy of the original subject string.
   1304   // subject: subject string
   1305   // r5: subject string
   1306   // regexp_data: RegExp data (FixedArray)
   1307   // Handle subject string according to its encoding and representation:
   1308   // (1) Sequential string?  If yes, go to (4).
   1309   // (2) Sequential or cons?  If not, go to (5).
   1310   // (3) Cons string.  If the string is flat, replace subject with first string
   1311   //     and go to (1). Otherwise bail out to runtime.
   1312   // (4) Sequential string.  Load regexp code according to encoding.
   1313   // (E) Carry on.
   1314   /// [...]
   1315 
   1316   // Deferred code at the end of the stub:
   1317   // (5) Long external string?  If not, go to (7).
   1318   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1319   //     Go to (4).
   1320   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1321   // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
   1322 
   1323   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
   1324       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
   1325 
   1326   __ bind(&check_underlying);
   1327   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
   1328   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
   1329 
   1330   // (1) Sequential string?  If yes, go to (4).
   1331 
   1332   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
   1333                  kShortExternalStringMask) == 0xa7);
   1334   __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
   1335                      kShortExternalStringMask));
   1336   __ AndP(r3, r2);
   1337   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   1338   __ beq(&seq_string, Label::kNear);  // Go to (4).
   1339 
   1340   // (2) Sequential or cons? If not, go to (5).
   1341   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   1342   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   1343   STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   1344   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   1345   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   1346   STATIC_ASSERT(kExternalStringTag < 0xffffu);
   1347   __ CmpP(r3, Operand(kExternalStringTag));
   1348   __ bge(&not_seq_nor_cons);  // Go to (5).
   1349 
   1350   // (3) Cons string.  Check that it's flat.
   1351   // Replace subject with first string and reload instance type.
   1352   __ LoadP(r2, FieldMemOperand(subject, ConsString::kSecondOffset));
   1353   __ CompareRoot(r2, Heap::kempty_stringRootIndex);
   1354   __ bne(&runtime);
   1355   __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   1356   __ b(&check_underlying);
   1357 
   1358   // (4) Sequential string.  Load regexp code according to encoding.
   1359   __ bind(&seq_string);
   1360   // subject: sequential subject string (or look-alike, external string)
   1361   // r5: original subject string
   1362   // Load previous index and check range before r5 is overwritten.  We have to
   1363   // use r5 instead of subject here because subject might have been only made
   1364   // to look like a sequential string when it actually is an external string.
   1365   __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset));
   1366   __ JumpIfNotSmi(r3, &runtime);
   1367   __ LoadP(r5, FieldMemOperand(r5, String::kLengthOffset));
   1368   __ CmpLogicalP(r5, r3);
   1369   __ ble(&runtime);
   1370   __ SmiUntag(r3);
   1371 
   1372   STATIC_ASSERT(8 == kOneByteStringTag);
   1373   STATIC_ASSERT(kTwoByteStringTag == 0);
   1374   STATIC_ASSERT(kStringEncodingMask == 8);
   1375   __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
   1376   __ beq(&encoding_type_UC16, Label::kNear);
   1377   __ LoadP(code,
   1378            FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
   1379   __ b(&br_over, Label::kNear);
   1380   __ bind(&encoding_type_UC16);
   1381   __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   1382   __ bind(&br_over);
   1383 
   1384   // (E) Carry on.  String handling is done.
   1385   // code: irregexp code
   1386   // Check that the irregexp code has been generated for the actual string
   1387   // encoding. If it has, the field contains a code object otherwise it contains
   1388   // a smi (code flushing support).
   1389   __ JumpIfSmi(code, &runtime);
   1390 
   1391   // r3: previous index
   1392   // r5: encoding of subject string (1 if one_byte, 0 if two_byte);
   1393   // code: Address of generated regexp code
   1394   // subject: Subject string
   1395   // regexp_data: RegExp data (FixedArray)
   1396   // All checks done. Now push arguments for native regexp code.
   1397   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r2, r4);
   1398 
   1399   // Isolates: note we add an additional parameter here (isolate pointer).
   1400   const int kRegExpExecuteArguments = 10;
   1401   const int kParameterRegisters = 5;
   1402   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
   1403 
   1404   // Stack pointer now points to cell where return address is to be written.
   1405   // Arguments are before that on the stack or in registers.
   1406 
   1407   // Argument 10 (in stack parameter area): Pass current isolate address.
   1408   __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
   1409   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
   1410                                    4 * kPointerSize));
   1411 
   1412   // Argument 9 is a dummy that reserves the space used for
   1413   // the return address added by the ExitFrame in native calls.
   1414   __ mov(r2, Operand::Zero());
   1415   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
   1416                                    3 * kPointerSize));
   1417 
   1418   // Argument 8: Indicate that this is a direct call from JavaScript.
   1419   __ mov(r2, Operand(1));
   1420   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
   1421                                    2 * kPointerSize));
   1422 
   1423   // Argument 7: Start (high end) of backtracking stack memory area.
   1424   __ mov(r2, Operand(address_of_regexp_stack_memory_address));
   1425   __ LoadP(r2, MemOperand(r2, 0));
   1426   __ mov(r1, Operand(address_of_regexp_stack_memory_size));
   1427   __ LoadP(r1, MemOperand(r1, 0));
   1428   __ AddP(r2, r1);
   1429   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
   1430                                    1 * kPointerSize));
   1431 
   1432   // Argument 6: Set the number of capture registers to zero to force
   1433   // global egexps to behave as non-global.  This does not affect non-global
   1434   // regexps.
   1435   __ mov(r2, Operand::Zero());
   1436   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
   1437                                    0 * kPointerSize));
   1438 
   1439   // Argument 1 (r2): Subject string.
   1440   // Load the length from the original subject string from the previous stack
   1441   // frame. Therefore we have to use fp, which points exactly to 15 pointer
   1442   // sizes below the previous sp. (Because creating a new stack frame pushes
   1443   // the previous fp onto the stack and moves up sp by 2 * kPointerSize and
   1444   // 13 registers saved on the stack previously)
   1445   __ LoadP(r2, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
   1446 
   1447   // Argument 2 (r3): Previous index.
   1448   // Already there
   1449   __ AddP(r1, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   1450 
   1451   // Argument 5 (r6): static offsets vector buffer.
   1452   __ mov(
   1453       r6,
   1454       Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
   1455 
   1456   // For arguments 4 (r5) and 3 (r4) get string length, calculate start of data
   1457   // and calculate the shift of the index (0 for one-byte and 1 for two byte).
   1458   __ XorP(r5, Operand(1));
   1459   // If slice offset is not 0, load the length from the original sliced string.
   1460   // Argument 3, r4: Start of string data
   1461   // Prepare start and end index of the input.
   1462   __ ShiftLeftP(ip, ip, r5);
   1463   __ AddP(ip, r1, ip);
   1464   __ ShiftLeftP(r4, r3, r5);
   1465   __ AddP(r4, ip, r4);
   1466 
   1467   // Argument 4, r5: End of string data
   1468   __ LoadP(r1, FieldMemOperand(r2, String::kLengthOffset));
   1469   __ SmiUntag(r1);
   1470   __ ShiftLeftP(r0, r1, r5);
   1471   __ AddP(r5, ip, r0);
   1472 
   1473   // Locate the code entry and call it.
   1474   __ AddP(code, Operand(Code::kHeaderSize - kHeapObjectTag));
   1475 
   1476   DirectCEntryStub stub(isolate());
   1477   stub.GenerateCall(masm, code);
   1478 
   1479   __ LeaveExitFrame(false, no_reg, true);
   1480 
   1481   // r2: result (int32)
   1482   // subject: subject string -- needed to reload
   1483   __ LoadP(subject, MemOperand(sp, kSubjectOffset));
   1484 
   1485   // regexp_data: RegExp data (callee saved)
   1486   // last_match_info_elements: Last match info elements (callee saved)
   1487   // Check the result.
   1488   Label success;
   1489   __ Cmp32(r2, Operand(1));
   1490   // We expect exactly one result since we force the called regexp to behave
   1491   // as non-global.
   1492   __ beq(&success);
   1493   Label failure;
   1494   __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::FAILURE));
   1495   __ beq(&failure);
   1496   __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   1497   // If not exception it can only be retry. Handle that in the runtime system.
   1498   __ bne(&runtime);
   1499   // Result must now be exception. If there is no pending exception already a
   1500   // stack overflow (on the backtrack stack) was detected in RegExp code but
   1501   // haven't created the exception yet. Handle that in the runtime system.
   1502   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   1503   __ mov(r3, Operand(isolate()->factory()->the_hole_value()));
   1504   __ mov(r4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
   1505                                        isolate())));
   1506   __ LoadP(r2, MemOperand(r4, 0));
   1507   __ CmpP(r2, r3);
   1508   __ beq(&runtime);
   1509 
   1510   // For exception, throw the exception again.
   1511   __ TailCallRuntime(Runtime::kRegExpExecReThrow);
   1512 
   1513   __ bind(&failure);
   1514   // For failure and exception return null.
   1515   __ mov(r2, Operand(isolate()->factory()->null_value()));
   1516   __ la(sp, MemOperand(sp, (4 * kPointerSize)));
   1517   __ Ret();
   1518 
   1519   // Process the result from the native regexp code.
   1520   __ bind(&success);
   1521   __ LoadP(r3,
   1522            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   1523   // Calculate number of capture registers (number_of_captures + 1) * 2.
   1524   // SmiToShortArrayOffset accomplishes the multiplication by 2 and
   1525   // SmiUntag (which is a nop for 32-bit).
   1526   __ SmiToShortArrayOffset(r3, r3);
   1527   __ AddP(r3, Operand(2));
   1528 
   1529   // Check that the last match info is a FixedArray.
   1530   __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
   1531   __ JumpIfSmi(last_match_info_elements, &runtime);
   1532   // Check that the object has fast elements.
   1533   __ LoadP(r2,
   1534            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   1535   __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
   1536   __ bne(&runtime);
   1537   // Check that the last match info has space for the capture registers and the
   1538   // additional information.
   1539   __ LoadP(
   1540       r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
   1541   __ AddP(r4, r3, Operand(RegExpMatchInfo::kLastMatchOverhead));
   1542   __ SmiUntag(r0, r2);
   1543   __ CmpP(r4, r0);
   1544   __ bgt(&runtime);
   1545 
   1546   // r3: number of capture registers
   1547   // subject: subject string
   1548   // Store the capture count.
   1549   __ SmiTag(r4, r3);
   1550   __ StoreP(r4, FieldMemOperand(last_match_info_elements,
   1551                                 RegExpMatchInfo::kNumberOfCapturesOffset));
   1552   // Store last subject and last input.
   1553   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
   1554                                      RegExpMatchInfo::kLastSubjectOffset));
   1555   __ LoadRR(r4, subject);
   1556   __ RecordWriteField(last_match_info_elements,
   1557                       RegExpMatchInfo::kLastSubjectOffset, subject, r9,
   1558                       kLRHasNotBeenSaved, kDontSaveFPRegs);
   1559   __ LoadRR(subject, r4);
   1560   __ StoreP(subject, FieldMemOperand(last_match_info_elements,
   1561                                      RegExpMatchInfo::kLastInputOffset));
   1562   __ RecordWriteField(last_match_info_elements,
   1563                       RegExpMatchInfo::kLastInputOffset, subject, r9,
   1564                       kLRHasNotBeenSaved, kDontSaveFPRegs);
   1565 
   1566   // Get the static offsets vector filled by the native regexp code.
   1567   ExternalReference address_of_static_offsets_vector =
   1568       ExternalReference::address_of_static_offsets_vector(isolate());
   1569   __ mov(r4, Operand(address_of_static_offsets_vector));
   1570 
   1571   // r3: number of capture registers
   1572   // r4: offsets vector
   1573   Label next_capture;
   1574   // Capture register counter starts from number of capture registers and
   1575   // counts down until wrapping after zero.
   1576   __ AddP(r2, last_match_info_elements,
   1577           Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
   1578                   kPointerSize));
   1579   __ AddP(r4, Operand(-kIntSize));  // bias down for lwzu
   1580   __ bind(&next_capture);
   1581   // Read the value from the static offsets vector buffer.
   1582   __ ly(r5, MemOperand(r4, kIntSize));
   1583   __ lay(r4, MemOperand(r4, kIntSize));
   1584   // Store the smi value in the last match info.
   1585   __ SmiTag(r5);
   1586   __ StoreP(r5, MemOperand(r2, kPointerSize));
   1587   __ lay(r2, MemOperand(r2, kPointerSize));
   1588   __ BranchOnCount(r3, &next_capture);
   1589 
   1590   // Return last match info.
   1591   __ LoadRR(r2, last_match_info_elements);
   1592   __ la(sp, MemOperand(sp, (4 * kPointerSize)));
   1593   __ Ret();
   1594 
   1595   // Do the runtime call to execute the regexp.
   1596   __ bind(&runtime);
   1597   __ TailCallRuntime(Runtime::kRegExpExec);
   1598 
   1599   // Deferred code for string handling.
   1600   // (5) Long external string? If not, go to (7).
   1601   __ bind(&not_seq_nor_cons);
   1602   // Compare flags are still set.
   1603   __ bgt(&not_long_external, Label::kNear);  // Go to (7).
   1604 
   1605   // (6) External string.  Make it, offset-wise, look like a sequential string.
   1606   __ bind(&external_string);
   1607   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
   1608   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
   1609   if (FLAG_debug_code) {
   1610     // Assert that we do not have a cons or slice (indirect strings) here.
   1611     // Sequential strings have already been ruled out.
   1612     STATIC_ASSERT(kIsIndirectStringMask == 1);
   1613     __ tmll(r2, Operand(kIsIndirectStringMask));
   1614     __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
   1615   }
   1616   __ LoadP(subject,
   1617            FieldMemOperand(subject, ExternalString::kResourceDataOffset));
   1618   // Move the pointer so that offset-wise, it looks like a sequential string.
   1619   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   1620   __ SubP(subject, subject,
   1621           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   1622   __ b(&seq_string);  // Go to (4).
   1623 
   1624   // (7) Short external string or not a string?  If yes, bail out to runtime.
   1625   __ bind(&not_long_external);
   1626   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
   1627   __ mov(r0, Operand(kIsNotStringMask | kShortExternalStringMask));
   1628   __ AndP(r0, r3);
   1629   __ bne(&runtime);
   1630 
   1631   // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
   1632   Label thin_string;
   1633   __ CmpP(r3, Operand(kThinStringTag));
   1634   __ beq(&thin_string);
   1635   // Load offset into ip and replace subject string with parent.
   1636   __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   1637   __ SmiUntag(ip);
   1638   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   1639   __ b(&check_underlying);  // Go to (4).
   1640 
   1641   __ bind(&thin_string);
   1642   __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
   1643   __ b(&check_underlying);  // Go to (4).
   1644 #endif  // V8_INTERPRETED_REGEXP
   1645 }
   1646 
   1647 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
   1648   // r2 : number of arguments to the construct function
   1649   // r3 : the function to call
   1650   // r4 : feedback vector
   1651   // r5 : slot in feedback vector (Smi)
   1652   FrameScope scope(masm, StackFrame::INTERNAL);
   1653 
   1654   // Number-of-arguments register must be smi-tagged to call out.
   1655   __ SmiTag(r2);
   1656   __ Push(r5, r4, r3, r2);
   1657   __ Push(cp);
   1658 
   1659   __ CallStub(stub);
   1660 
   1661   __ Pop(cp);
   1662   __ Pop(r5, r4, r3, r2);
   1663   __ SmiUntag(r2);
   1664 }
   1665 
   1666 static void GenerateRecordCallTarget(MacroAssembler* masm) {
   1667   // Cache the called function in a feedback vector slot.  Cache states
   1668   // are uninitialized, monomorphic (indicated by a JSFunction), and
   1669   // megamorphic.
   1670   // r2 : number of arguments to the construct function
   1671   // r3 : the function to call
   1672   // r4 : feedback vector
   1673   // r5 : slot in feedback vector (Smi)
   1674   Label initialize, done, miss, megamorphic, not_array_function;
   1675 
   1676   DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
   1677             masm->isolate()->heap()->megamorphic_symbol());
   1678   DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
   1679             masm->isolate()->heap()->uninitialized_symbol());
   1680 
   1681   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
   1682 
   1683   // Load the cache state into r7.
   1684   __ SmiToPtrArrayOffset(r7, r5);
   1685   __ AddP(r7, r4, r7);
   1686   __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
   1687 
   1688   // A monomorphic cache hit or an already megamorphic state: invoke the
   1689   // function without changing the state.
   1690   // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
   1691   // this position in a symbol (see static asserts in feedback-vector.h).
   1692   Label check_allocation_site;
   1693   Register feedback_map = r8;
   1694   Register weak_value = r9;
   1695   __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
   1696   __ CmpP(r3, weak_value);
   1697   __ beq(&done, Label::kNear);
   1698   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
   1699   __ beq(&done, Label::kNear);
   1700   __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
   1701   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
   1702   __ bne(&check_allocation_site);
   1703 
   1704   // If the weak cell is cleared, we have a new chance to become monomorphic.
   1705   __ JumpIfSmi(weak_value, &initialize);
   1706   __ b(&megamorphic);
   1707 
   1708   __ bind(&check_allocation_site);
   1709   // If we came here, we need to see if we are the array function.
   1710   // If we didn't have a matching function, and we didn't find the megamorph
   1711   // sentinel, then we have in the slot either some other function or an
   1712   // AllocationSite.
   1713   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
   1714   __ bne(&miss);
   1715 
   1716   // Make sure the function is the Array() function
   1717   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
   1718   __ CmpP(r3, r7);
   1719   __ bne(&megamorphic);
   1720   __ b(&done, Label::kNear);
   1721 
   1722   __ bind(&miss);
   1723 
   1724   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   1725   // megamorphic.
   1726   __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
   1727   __ beq(&initialize);
   1728   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   1729   // write-barrier is needed.
   1730   __ bind(&megamorphic);
   1731   __ SmiToPtrArrayOffset(r7, r5);
   1732   __ AddP(r7, r4, r7);
   1733   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
   1734   __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
   1735   __ jmp(&done);
   1736 
   1737   // An uninitialized cache is patched with the function
   1738   __ bind(&initialize);
   1739 
   1740   // Make sure the function is the Array() function.
   1741   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
   1742   __ CmpP(r3, r7);
   1743   __ bne(&not_array_function);
   1744 
   1745   // The target function is the Array constructor,
   1746   // Create an AllocationSite if we don't already have it, store it in the
   1747   // slot.
   1748   CreateAllocationSiteStub create_stub(masm->isolate());
   1749   CallStubInRecordCallTarget(masm, &create_stub);
   1750   __ b(&done, Label::kNear);
   1751 
   1752   __ bind(&not_array_function);
   1753 
   1754   CreateWeakCellStub weak_cell_stub(masm->isolate());
   1755   CallStubInRecordCallTarget(masm, &weak_cell_stub);
   1756 
   1757   __ bind(&done);
   1758 
   1759   // Increment the call count for all function calls.
   1760   __ SmiToPtrArrayOffset(r7, r5);
   1761   __ AddP(r7, r4, r7);
   1762 
   1763   __ LoadP(r6, FieldMemOperand(r7, count_offset));
   1764   __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
   1765   __ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
   1766 }
   1767 
   1768 void CallConstructStub::Generate(MacroAssembler* masm) {
   1769   // r2 : number of arguments
   1770   // r3 : the function to call
   1771   // r4 : feedback vector
   1772   // r5 : slot in feedback vector (Smi, for RecordCallTarget)
   1773 
   1774   Label non_function;
   1775   // Check that the function is not a smi.
   1776   __ JumpIfSmi(r3, &non_function);
   1777   // Check that the function is a JSFunction.
   1778   __ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
   1779   __ bne(&non_function);
   1780 
   1781   GenerateRecordCallTarget(masm);
   1782 
   1783   __ SmiToPtrArrayOffset(r7, r5);
   1784   __ AddP(r7, r4, r7);
   1785   // Put the AllocationSite from the feedback vector into r4, or undefined.
   1786   __ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize));
   1787   __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
   1788   __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
   1789   Label feedback_register_initialized;
   1790   __ beq(&feedback_register_initialized);
   1791   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
   1792   __ bind(&feedback_register_initialized);
   1793 
   1794   __ AssertUndefinedOrAllocationSite(r4, r7);
   1795 
   1796   // Pass function as new target.
   1797   __ LoadRR(r5, r3);
   1798 
   1799   // Tail call to the function-specific construct stub (still in the caller
   1800   // context at this point).
   1801   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
   1802   __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
   1803   __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
   1804   __ JumpToJSEntry(ip);
   1805 
   1806   __ bind(&non_function);
   1807   __ LoadRR(r5, r3);
   1808   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   1809 }
   1810 
   1811 // StringCharCodeAtGenerator
   1812 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   1813   // If the receiver is a smi trigger the non-string case.
   1814   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
   1815     __ JumpIfSmi(object_, receiver_not_string_);
   1816 
   1817     // Fetch the instance type of the receiver into result register.
   1818     __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   1819     __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   1820     // If the receiver is not a string trigger the non-string case.
   1821     __ mov(r0, Operand(kIsNotStringMask));
   1822     __ AndP(r0, result_);
   1823     __ bne(receiver_not_string_);
   1824   }
   1825 
   1826   // If the index is non-smi trigger the non-smi case.
   1827   __ JumpIfNotSmi(index_, &index_not_smi_);
   1828   __ bind(&got_smi_index_);
   1829 
   1830   // Check for index out of range.
   1831   __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
   1832   __ CmpLogicalP(ip, index_);
   1833   __ ble(index_out_of_range_);
   1834 
   1835   __ SmiUntag(index_);
   1836 
   1837   StringCharLoadGenerator::Generate(masm, object_, index_, result_,
   1838                                     &call_runtime_);
   1839 
   1840   __ SmiTag(result_);
   1841   __ bind(&exit_);
   1842 }
   1843 
   1844 void StringCharCodeAtGenerator::GenerateSlow(
   1845     MacroAssembler* masm, EmbedMode embed_mode,
   1846     const RuntimeCallHelper& call_helper) {
   1847   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
   1848 
   1849   // Index is not a smi.
   1850   __ bind(&index_not_smi_);
   1851   // If index is a heap number, try converting it to an integer.
   1852   __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
   1853               DONT_DO_SMI_CHECK);
   1854   call_helper.BeforeCall(masm);
   1855   if (embed_mode == PART_OF_IC_HANDLER) {
   1856     __ Push(LoadWithVectorDescriptor::VectorRegister(),
   1857             LoadWithVectorDescriptor::SlotRegister(), object_, index_);
   1858   } else {
   1859     // index_ is consumed by runtime conversion function.
   1860     __ Push(object_, index_);
   1861   }
   1862   __ CallRuntime(Runtime::kNumberToSmi);
   1863   // Save the conversion result before the pop instructions below
   1864   // have a chance to overwrite it.
   1865   __ Move(index_, r2);
   1866   if (embed_mode == PART_OF_IC_HANDLER) {
   1867     __ Pop(LoadWithVectorDescriptor::VectorRegister(),
   1868            LoadWithVectorDescriptor::SlotRegister(), object_);
   1869   } else {
   1870     __ pop(object_);
   1871   }
   1872   // Reload the instance type.
   1873   __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   1874   __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   1875   call_helper.AfterCall(masm);
   1876   // If index is still not a smi, it must be out of range.
   1877   __ JumpIfNotSmi(index_, index_out_of_range_);
   1878   // Otherwise, return to the fast path.
   1879   __ b(&got_smi_index_);
   1880 
   1881   // Call runtime. We get here when the receiver is a string and the
   1882   // index is a number, but the code of getting the actual character
   1883   // is too complex (e.g., when the string needs to be flattened).
   1884   __ bind(&call_runtime_);
   1885   call_helper.BeforeCall(masm);
   1886   __ SmiTag(index_);
   1887   __ Push(object_, index_);
   1888   __ CallRuntime(Runtime::kStringCharCodeAtRT);
   1889   __ Move(result_, r2);
   1890   call_helper.AfterCall(masm);
   1891   __ b(&exit_);
   1892 
   1893   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
   1894 }
   1895 
   1896 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
   1897                                                    Register left,
   1898                                                    Register right,
   1899                                                    Register scratch1,
   1900                                                    Register scratch2) {
   1901   Register length = scratch1;
   1902 
   1903   // Compare lengths.
   1904   Label strings_not_equal, check_zero_length;
   1905   __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
   1906   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
   1907   __ CmpP(length, scratch2);
   1908   __ beq(&check_zero_length);
   1909   __ bind(&strings_not_equal);
   1910   __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL));
   1911   __ Ret();
   1912 
   1913   // Check if the length is zero.
   1914   Label compare_chars;
   1915   __ bind(&check_zero_length);
   1916   STATIC_ASSERT(kSmiTag == 0);
   1917   __ CmpP(length, Operand::Zero());
   1918   __ bne(&compare_chars);
   1919   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
   1920   __ Ret();
   1921 
   1922   // Compare characters.
   1923   __ bind(&compare_chars);
   1924   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
   1925                                   &strings_not_equal);
   1926 
   1927   // Characters are equal.
   1928   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
   1929   __ Ret();
   1930 }
   1931 
   1932 void StringHelper::GenerateCompareFlatOneByteStrings(
   1933     MacroAssembler* masm, Register left, Register right, Register scratch1,
   1934     Register scratch2, Register scratch3) {
   1935   Label skip, result_not_equal, compare_lengths;
   1936   // Find minimum length and length difference.
   1937   __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
   1938   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
   1939   __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/);
   1940   // Removing RC looks okay here.
   1941   Register length_delta = scratch3;
   1942   __ ble(&skip, Label::kNear);
   1943   __ LoadRR(scratch1, scratch2);
   1944   __ bind(&skip);
   1945   Register min_length = scratch1;
   1946   STATIC_ASSERT(kSmiTag == 0);
   1947   __ CmpP(min_length, Operand::Zero());
   1948   __ beq(&compare_lengths);
   1949 
   1950   // Compare loop.
   1951   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
   1952                                   &result_not_equal);
   1953 
   1954   // Compare lengths - strings up to min-length are equal.
   1955   __ bind(&compare_lengths);
   1956   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   1957   // Use length_delta as result if it's zero.
   1958   __ LoadRR(r2, length_delta);
   1959   __ CmpP(length_delta, Operand::Zero());
   1960   __ bind(&result_not_equal);
   1961   // Conditionally update the result based either on length_delta or
   1962   // the last comparion performed in the loop above.
   1963   Label less_equal, equal;
   1964   __ ble(&less_equal);
   1965   __ LoadSmiLiteral(r2, Smi::FromInt(GREATER));
   1966   __ Ret();
   1967   __ bind(&less_equal);
   1968   __ beq(&equal);
   1969   __ LoadSmiLiteral(r2, Smi::FromInt(LESS));
   1970   __ bind(&equal);
   1971   __ Ret();
   1972 }
   1973 
   1974 void StringHelper::GenerateOneByteCharsCompareLoop(
   1975     MacroAssembler* masm, Register left, Register right, Register length,
   1976     Register scratch1, Label* chars_not_equal) {
   1977   // Change index to run from -length to -1 by adding length to string
   1978   // start. This means that loop ends when index reaches zero, which
   1979   // doesn't need an additional compare.
   1980   __ SmiUntag(length);
   1981   __ AddP(scratch1, length,
   1982           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   1983   __ AddP(left, scratch1);
   1984   __ AddP(right, scratch1);
   1985   __ LoadComplementRR(length, length);
   1986   Register index = length;  // index = -length;
   1987 
   1988   // Compare loop.
   1989   Label loop;
   1990   __ bind(&loop);
   1991   __ LoadlB(scratch1, MemOperand(left, index));
   1992   __ LoadlB(r0, MemOperand(right, index));
   1993   __ CmpP(scratch1, r0);
   1994   __ bne(chars_not_equal);
   1995   __ AddP(index, Operand(1));
   1996   __ CmpP(index, Operand::Zero());
   1997   __ bne(&loop);
   1998 }
   1999 
   2000 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   2001   // ----------- S t a t e -------------
   2002   //  -- r3    : left
   2003   //  -- r2    : right
   2004   // r3: second string
   2005   // -----------------------------------
   2006 
   2007   // Load r4 with the allocation site.  We stick an undefined dummy value here
   2008   // and replace it with the real allocation site later when we instantiate this
   2009   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
   2010   __ Move(r4, isolate()->factory()->undefined_value());
   2011 
   2012   // Make sure that we actually patched the allocation site.
   2013   if (FLAG_debug_code) {
   2014     __ TestIfSmi(r4);
   2015     __ Assert(ne, kExpectedAllocationSite, cr0);
   2016     __ push(r4);
   2017     __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset));
   2018     __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
   2019     __ pop(r4);
   2020     __ Assert(eq, kExpectedAllocationSite);
   2021   }
   2022 
   2023   // Tail call into the stub that handles binary operations with allocation
   2024   // sites.
   2025   BinaryOpWithAllocationSiteStub stub(isolate(), state());
   2026   __ TailCallStub(&stub);
   2027 }
   2028 
   2029 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   2030   DCHECK_EQ(CompareICState::BOOLEAN, state());
   2031   Label miss;
   2032 
   2033   __ CheckMap(r3, r4, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2034   __ CheckMap(r2, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   2035   if (!Token::IsEqualityOp(op())) {
   2036     __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
   2037     __ AssertSmi(r3);
   2038     __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
   2039     __ AssertSmi(r2);
   2040   }
   2041   __ SubP(r2, r3, r2);
   2042   __ Ret();
   2043 
   2044   __ bind(&miss);
   2045   GenerateMiss(masm);
   2046 }
   2047 
   2048 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   2049   DCHECK(state() == CompareICState::SMI);
   2050   Label miss;
   2051   __ OrP(r4, r3, r2);
   2052   __ JumpIfNotSmi(r4, &miss);
   2053 
   2054   if (GetCondition() == eq) {
   2055     // For equality we do not care about the sign of the result.
   2056     // __ sub(r2, r2, r3, SetCC);
   2057     __ SubP(r2, r2, r3);
   2058   } else {
   2059     // Untag before subtracting to avoid handling overflow.
   2060     __ SmiUntag(r3);
   2061     __ SmiUntag(r2);
   2062     __ SubP(r2, r3, r2);
   2063   }
   2064   __ Ret();
   2065 
   2066   __ bind(&miss);
   2067   GenerateMiss(masm);
   2068 }
   2069 
   2070 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
   2071   DCHECK(state() == CompareICState::NUMBER);
   2072 
   2073   Label generic_stub;
   2074   Label unordered, maybe_undefined1, maybe_undefined2;
   2075   Label miss;
   2076   Label equal, less_than;
   2077 
   2078   if (left() == CompareICState::SMI) {
   2079     __ JumpIfNotSmi(r3, &miss);
   2080   }
   2081   if (right() == CompareICState::SMI) {
   2082     __ JumpIfNotSmi(r2, &miss);
   2083   }
   2084 
   2085   // Inlining the double comparison and falling back to the general compare
   2086   // stub if NaN is involved.
   2087   // Load left and right operand.
   2088   Label done, left, left_smi, right_smi;
   2089   __ JumpIfSmi(r2, &right_smi);
   2090   __ CheckMap(r2, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
   2091               DONT_DO_SMI_CHECK);
   2092   __ LoadDouble(d1, FieldMemOperand(r2, HeapNumber::kValueOffset));
   2093   __ b(&left);
   2094   __ bind(&right_smi);
   2095   __ SmiToDouble(d1, r2);
   2096 
   2097   __ bind(&left);
   2098   __ JumpIfSmi(r3, &left_smi);
   2099   __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
   2100               DONT_DO_SMI_CHECK);
   2101   __ LoadDouble(d0, FieldMemOperand(r3, HeapNumber::kValueOffset));
   2102   __ b(&done);
   2103   __ bind(&left_smi);
   2104   __ SmiToDouble(d0, r3);
   2105 
   2106   __ bind(&done);
   2107 
   2108   // Compare operands
   2109   __ cdbr(d0, d1);
   2110 
   2111   // Don't base result on status bits when a NaN is involved.
   2112   __ bunordered(&unordered);
   2113 
   2114   // Return a result of -1, 0, or 1, based on status bits.
   2115   __ beq(&equal);
   2116   __ blt(&less_than);
   2117   //  assume greater than
   2118   __ LoadImmP(r2, Operand(GREATER));
   2119   __ Ret();
   2120   __ bind(&equal);
   2121   __ LoadImmP(r2, Operand(EQUAL));
   2122   __ Ret();
   2123   __ bind(&less_than);
   2124   __ LoadImmP(r2, Operand(LESS));
   2125   __ Ret();
   2126 
   2127   __ bind(&unordered);
   2128   __ bind(&generic_stub);
   2129   CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
   2130                      CompareICState::GENERIC, CompareICState::GENERIC);
   2131   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   2132 
   2133   __ bind(&maybe_undefined1);
   2134   if (Token::IsOrderedRelationalCompareOp(op())) {
   2135     __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
   2136     __ bne(&miss);
   2137     __ JumpIfSmi(r3, &unordered);
   2138     __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
   2139     __ bne(&maybe_undefined2);
   2140     __ b(&unordered);
   2141   }
   2142 
   2143   __ bind(&maybe_undefined2);
   2144   if (Token::IsOrderedRelationalCompareOp(op())) {
   2145     __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
   2146     __ beq(&unordered);
   2147   }
   2148 
   2149   __ bind(&miss);
   2150   GenerateMiss(masm);
   2151 }
   2152 
   2153 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
   2154   DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   2155   Label miss, not_equal;
   2156 
   2157   // Registers containing left and right operands respectively.
   2158   Register left = r3;
   2159   Register right = r2;
   2160   Register tmp1 = r4;
   2161   Register tmp2 = r5;
   2162 
   2163   // Check that both operands are heap objects.
   2164   __ JumpIfEitherSmi(left, right, &miss);
   2165 
   2166   // Check that both operands are symbols.
   2167   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2168   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2169   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2170   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2171   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   2172   __ OrP(tmp1, tmp1, tmp2);
   2173   __ AndP(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
   2174   __ bne(&miss);
   2175 
   2176   // Internalized strings are compared by identity.
   2177   __ CmpP(left, right);
   2178   __ bne(&not_equal);
   2179   // Make sure r2 is non-zero. At this point input operands are
   2180   // guaranteed to be non-zero.
   2181   DCHECK(right.is(r2));
   2182   STATIC_ASSERT(EQUAL == 0);
   2183   STATIC_ASSERT(kSmiTag == 0);
   2184   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
   2185   __ bind(&not_equal);
   2186   __ Ret();
   2187 
   2188   __ bind(&miss);
   2189   GenerateMiss(masm);
   2190 }
   2191 
   2192 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
   2193   DCHECK(state() == CompareICState::UNIQUE_NAME);
   2194   DCHECK(GetCondition() == eq);
   2195   Label miss;
   2196 
   2197   // Registers containing left and right operands respectively.
   2198   Register left = r3;
   2199   Register right = r2;
   2200   Register tmp1 = r4;
   2201   Register tmp2 = r5;
   2202 
   2203   // Check that both operands are heap objects.
   2204   __ JumpIfEitherSmi(left, right, &miss);
   2205 
   2206   // Check that both operands are unique names. This leaves the instance
   2207   // types loaded in tmp1 and tmp2.
   2208   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2209   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2210   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2211   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2212 
   2213   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
   2214   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
   2215 
   2216   // Unique names are compared by identity.
   2217   __ CmpP(left, right);
   2218   __ bne(&miss);
   2219   // Make sure r2 is non-zero. At this point input operands are
   2220   // guaranteed to be non-zero.
   2221   DCHECK(right.is(r2));
   2222   STATIC_ASSERT(EQUAL == 0);
   2223   STATIC_ASSERT(kSmiTag == 0);
   2224   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
   2225   __ Ret();
   2226 
   2227   __ bind(&miss);
   2228   GenerateMiss(masm);
   2229 }
   2230 
   2231 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
   2232   DCHECK(state() == CompareICState::STRING);
   2233   Label miss, not_identical, is_symbol;
   2234 
   2235   bool equality = Token::IsEqualityOp(op());
   2236 
   2237   // Registers containing left and right operands respectively.
   2238   Register left = r3;
   2239   Register right = r2;
   2240   Register tmp1 = r4;
   2241   Register tmp2 = r5;
   2242   Register tmp3 = r6;
   2243   Register tmp4 = r7;
   2244 
   2245   // Check that both operands are heap objects.
   2246   __ JumpIfEitherSmi(left, right, &miss);
   2247 
   2248   // Check that both operands are strings. This leaves the instance
   2249   // types loaded in tmp1 and tmp2.
   2250   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
   2251   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
   2252   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   2253   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
   2254   STATIC_ASSERT(kNotStringTag != 0);
   2255   __ OrP(tmp3, tmp1, tmp2);
   2256   __ AndP(r0, tmp3, Operand(kIsNotStringMask));
   2257   __ bne(&miss);
   2258 
   2259   // Fast check for identical strings.
   2260   __ CmpP(left, right);
   2261   STATIC_ASSERT(EQUAL == 0);
   2262   STATIC_ASSERT(kSmiTag == 0);
   2263   __ bne(&not_identical);
   2264   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
   2265   __ Ret();
   2266   __ bind(&not_identical);
   2267 
   2268   // Handle not identical strings.
   2269 
   2270   // Check that both strings are internalized strings. If they are, we're done
   2271   // because we already know they are not identical. We know they are both
   2272   // strings.
   2273   if (equality) {
   2274     DCHECK(GetCondition() == eq);
   2275     STATIC_ASSERT(kInternalizedTag == 0);
   2276     __ OrP(tmp3, tmp1, tmp2);
   2277     __ AndP(r0, tmp3, Operand(kIsNotInternalizedMask));
   2278     __ bne(&is_symbol);
   2279     // Make sure r2 is non-zero. At this point input operands are
   2280     // guaranteed to be non-zero.
   2281     DCHECK(right.is(r2));
   2282     __ Ret();
   2283     __ bind(&is_symbol);
   2284   }
   2285 
   2286   // Check that both strings are sequential one-byte.
   2287   Label runtime;
   2288   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
   2289                                                     &runtime);
   2290 
   2291   // Compare flat one-byte strings. Returns when done.
   2292   if (equality) {
   2293     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
   2294                                                   tmp2);
   2295   } else {
   2296     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
   2297                                                     tmp2, tmp3);
   2298   }
   2299 
   2300   // Handle more complex cases in runtime.
   2301   __ bind(&runtime);
   2302   if (equality) {
   2303     {
   2304       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   2305       __ Push(left, right);
   2306       __ CallRuntime(Runtime::kStringEqual);
   2307     }
   2308     __ LoadRoot(r3, Heap::kTrueValueRootIndex);
   2309     __ SubP(r2, r2, r3);
   2310     __ Ret();
   2311   } else {
   2312     __ Push(left, right);
   2313     __ TailCallRuntime(Runtime::kStringCompare);
   2314   }
   2315 
   2316   __ bind(&miss);
   2317   GenerateMiss(masm);
   2318 }
   2319 
   2320 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
   2321   DCHECK_EQ(CompareICState::RECEIVER, state());
   2322   Label miss;
   2323   __ AndP(r4, r3, r2);
   2324   __ JumpIfSmi(r4, &miss);
   2325 
   2326   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
   2327   __ CompareObjectType(r2, r4, r4, FIRST_JS_RECEIVER_TYPE);
   2328   __ blt(&miss);
   2329   __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
   2330   __ blt(&miss);
   2331 
   2332   DCHECK(GetCondition() == eq);
   2333   __ SubP(r2, r2, r3);
   2334   __ Ret();
   2335 
   2336   __ bind(&miss);
   2337   GenerateMiss(masm);
   2338 }
   2339 
   2340 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
   2341   Label miss;
   2342   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
   2343   __ AndP(r4, r3, r2);
   2344   __ JumpIfSmi(r4, &miss);
   2345   __ GetWeakValue(r6, cell);
   2346   __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
   2347   __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
   2348   __ CmpP(r4, r6);
   2349   __ bne(&miss);
   2350   __ CmpP(r5, r6);
   2351   __ bne(&miss);
   2352 
   2353   if (Token::IsEqualityOp(op())) {
   2354     __ SubP(r2, r2, r3);
   2355     __ Ret();
   2356   } else {
   2357     if (op() == Token::LT || op() == Token::LTE) {
   2358       __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
   2359     } else {
   2360       __ LoadSmiLiteral(r4, Smi::FromInt(LESS));
   2361     }
   2362     __ Push(r3, r2, r4);
   2363     __ TailCallRuntime(Runtime::kCompare);
   2364   }
   2365 
   2366   __ bind(&miss);
   2367   GenerateMiss(masm);
   2368 }
   2369 
   2370 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   2371   {
   2372     // Call the runtime system in a fresh internal frame.
   2373     FrameScope scope(masm, StackFrame::INTERNAL);
   2374     __ Push(r3, r2);
   2375     __ Push(r3, r2);
   2376     __ LoadSmiLiteral(r0, Smi::FromInt(op()));
   2377     __ push(r0);
   2378     __ CallRuntime(Runtime::kCompareIC_Miss);
   2379     // Compute the entry point of the rewritten stub.
   2380     __ AddP(r4, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
   2381     // Restore registers.
   2382     __ Pop(r3, r2);
   2383   }
   2384 
   2385   __ JumpToJSEntry(r4);
   2386 }
   2387 
   2388 // This stub is paired with DirectCEntryStub::GenerateCall
   2389 void DirectCEntryStub::Generate(MacroAssembler* masm) {
   2390   __ CleanseP(r14);
   2391 
   2392   __ b(ip);  // Callee will return to R14 directly
   2393 }
   2394 
   2395 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
   2396 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
   2397   // Native AIX/S390X Linux use a function descriptor.
   2398   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
   2399   __ LoadP(target, MemOperand(target, 0));  // Instruction address
   2400 #else
   2401   // ip needs to be set for DirectCEentryStub::Generate, and also
   2402   // for ABI_CALL_VIA_IP.
   2403   __ Move(ip, target);
   2404 #endif
   2405 
   2406   __ call(GetCode(), RelocInfo::CODE_TARGET);  // Call the stub.
   2407 }
   2408 
   2409 void NameDictionaryLookupStub::GenerateNegativeLookup(
   2410     MacroAssembler* masm, Label* miss, Label* done, Register receiver,
   2411     Register properties, Handle<Name> name, Register scratch0) {
   2412   DCHECK(name->IsUniqueName());
   2413   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   2414   // not equal to the name and kProbes-th slot is not used (its name is the
   2415   // undefined value), it guarantees the hash table doesn't contain the
   2416   // property. It's true even if some slots represent deleted properties
   2417   // (their names are the hole value).
   2418   for (int i = 0; i < kInlinedProbes; i++) {
   2419     // scratch0 points to properties hash.
   2420     // Compute the masked index: (hash + i + i * i) & mask.
   2421     Register index = scratch0;
   2422     // Capacity is smi 2^n.
   2423     __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
   2424     __ SubP(index, Operand(1));
   2425     __ LoadSmiLiteral(
   2426         ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
   2427     __ AndP(index, ip);
   2428 
   2429     // Scale the index by multiplying by the entry size.
   2430     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   2431     __ ShiftLeftP(ip, index, Operand(1));
   2432     __ AddP(index, ip);  // index *= 3.
   2433 
   2434     Register entity_name = scratch0;
   2435     // Having undefined at this place means the name is not contained.
   2436     Register tmp = properties;
   2437     __ SmiToPtrArrayOffset(ip, index);
   2438     __ AddP(tmp, properties, ip);
   2439     __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
   2440 
   2441     DCHECK(!tmp.is(entity_name));
   2442     __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
   2443     __ beq(done);
   2444 
   2445     // Stop if found the property.
   2446     __ CmpP(entity_name, Operand(Handle<Name>(name)));
   2447     __ beq(miss);
   2448 
   2449     Label good;
   2450     __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
   2451     __ beq(&good);
   2452 
   2453     // Check if the entry name is not a unique name.
   2454     __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
   2455     __ LoadlB(entity_name,
   2456               FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
   2457     __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
   2458     __ bind(&good);
   2459 
   2460     // Restore the properties.
   2461     __ LoadP(properties,
   2462              FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   2463   }
   2464 
   2465   const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
   2466                           r4.bit() | r3.bit() | r2.bit());
   2467 
   2468   __ LoadRR(r0, r14);
   2469   __ MultiPush(spill_mask);
   2470 
   2471   __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   2472   __ mov(r3, Operand(Handle<Name>(name)));
   2473   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
   2474   __ CallStub(&stub);
   2475   __ CmpP(r2, Operand::Zero());
   2476 
   2477   __ MultiPop(spill_mask);  // MultiPop does not touch condition flags
   2478   __ LoadRR(r14, r0);
   2479 
   2480   __ beq(done);
   2481   __ bne(miss);
   2482 }
   2483 
   2484 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   2485   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   2486   // we cannot call anything that could cause a GC from this stub.
   2487   // Registers:
   2488   //  result: NameDictionary to probe
   2489   //  r3: key
   2490   //  dictionary: NameDictionary to probe.
   2491   //  index: will hold an index of entry if lookup is successful.
   2492   //         might alias with result_.
   2493   // Returns:
   2494   //  result_ is zero if lookup failed, non zero otherwise.
   2495 
   2496   Register result = r2;
   2497   Register dictionary = r2;
   2498   Register key = r3;
   2499   Register index = r4;
   2500   Register mask = r5;
   2501   Register hash = r6;
   2502   Register undefined = r7;
   2503   Register entry_key = r8;
   2504   Register scratch = r8;
   2505 
   2506   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
   2507 
   2508   __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
   2509   __ SmiUntag(mask);
   2510   __ SubP(mask, Operand(1));
   2511 
   2512   __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset));
   2513 
   2514   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
   2515 
   2516   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
   2517     // Compute the masked index: (hash + i + i * i) & mask.
   2518     // Capacity is smi 2^n.
   2519     if (i > 0) {
   2520       // Add the probe offset (i + i * i) left shifted to avoid right shifting
   2521       // the hash in a separate instruction. The value hash + i + i * i is right
   2522       // shifted in the following and instruction.
   2523       DCHECK(NameDictionary::GetProbeOffset(i) <
   2524              1 << (32 - Name::kHashFieldOffset));
   2525       __ AddP(index, hash,
   2526               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
   2527     } else {
   2528       __ LoadRR(index, hash);
   2529     }
   2530     __ ShiftRight(r0, index, Operand(String::kHashShift));
   2531     __ AndP(index, r0, mask);
   2532 
   2533     // Scale the index by multiplying by the entry size.
   2534     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
   2535     __ ShiftLeftP(scratch, index, Operand(1));
   2536     __ AddP(index, scratch);  // index *= 3.
   2537 
   2538     __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2));
   2539     __ AddP(index, dictionary, scratch);
   2540     __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
   2541 
   2542     // Having undefined at this place means the name is not contained.
   2543     __ CmpP(entry_key, undefined);
   2544     __ beq(&not_in_dictionary);
   2545 
   2546     // Stop if found the property.
   2547     __ CmpP(entry_key, key);
   2548     __ beq(&in_dictionary);
   2549 
   2550     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
   2551       // Check if the entry name is not a unique name.
   2552       __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
   2553       __ LoadlB(entry_key,
   2554                 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
   2555       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
   2556     }
   2557   }
   2558 
   2559   __ bind(&maybe_in_dictionary);
   2560   // If we are doing negative lookup then probing failure should be
   2561   // treated as a lookup success. For positive lookup probing failure
   2562   // should be treated as lookup failure.
   2563   if (mode() == POSITIVE_LOOKUP) {
   2564     __ LoadImmP(result, Operand::Zero());
   2565     __ Ret();
   2566   }
   2567 
   2568   __ bind(&in_dictionary);
   2569   __ LoadImmP(result, Operand(1));
   2570   __ Ret();
   2571 
   2572   __ bind(&not_in_dictionary);
   2573   __ LoadImmP(result, Operand::Zero());
   2574   __ Ret();
   2575 }
   2576 
   2577 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
   2578     Isolate* isolate) {
   2579   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
   2580   stub1.GetCode();
   2581   // Hydrogen code stubs need stub2 at snapshot time.
   2582   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
   2583   stub2.GetCode();
   2584 }
   2585 
   2586 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
   2587 // the value has just been written into the object, now this stub makes sure
   2588 // we keep the GC informed.  The word in the object where the value has been
   2589 // written is in the address register.
   2590 void RecordWriteStub::Generate(MacroAssembler* masm) {
   2591   Label skip_to_incremental_noncompacting;
   2592   Label skip_to_incremental_compacting;
   2593 
   2594   // The first two branch instructions are generated with labels so as to
   2595   // get the offset fixed up correctly by the bind(Label*) call.  We patch
   2596   // it back and forth between branch condition True and False
   2597   // when we start and stop incremental heap marking.
   2598   // See RecordWriteStub::Patch for details.
   2599 
   2600   // Clear the bit, branch on True for NOP action initially
   2601   __ b(CC_NOP, &skip_to_incremental_noncompacting);
   2602   __ b(CC_NOP, &skip_to_incremental_compacting);
   2603 
   2604   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   2605     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   2606                            MacroAssembler::kReturnAtEnd);
   2607   }
   2608   __ Ret();
   2609 
   2610   __ bind(&skip_to_incremental_noncompacting);
   2611   GenerateIncremental(masm, INCREMENTAL);
   2612 
   2613   __ bind(&skip_to_incremental_compacting);
   2614   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
   2615 
   2616   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
   2617   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
   2618   // patching not required on S390 as the initial path is effectively NOP
   2619 }
   2620 
   2621 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   2622   regs_.Save(masm);
   2623 
   2624   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
   2625     Label dont_need_remembered_set;
   2626 
   2627     __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
   2628     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
   2629                            regs_.scratch0(), &dont_need_remembered_set);
   2630 
   2631     __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
   2632                         &dont_need_remembered_set);
   2633 
   2634     // First notify the incremental marker if necessary, then update the
   2635     // remembered set.
   2636     CheckNeedsToInformIncrementalMarker(
   2637         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
   2638     InformIncrementalMarker(masm);
   2639     regs_.Restore(masm);
   2640     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   2641                            MacroAssembler::kReturnAtEnd);
   2642 
   2643     __ bind(&dont_need_remembered_set);
   2644   }
   2645 
   2646   CheckNeedsToInformIncrementalMarker(
   2647       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
   2648   InformIncrementalMarker(masm);
   2649   regs_.Restore(masm);
   2650   __ Ret();
   2651 }
   2652 
   2653 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
   2654   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   2655   int argument_count = 3;
   2656   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   2657   Register address =
   2658       r2.is(regs_.address()) ? regs_.scratch0() : regs_.address();
   2659   DCHECK(!address.is(regs_.object()));
   2660   DCHECK(!address.is(r2));
   2661   __ LoadRR(address, regs_.address());
   2662   __ LoadRR(r2, regs_.object());
   2663   __ LoadRR(r3, address);
   2664   __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
   2665 
   2666   AllowExternalCallThatCantCauseGC scope(masm);
   2667   __ CallCFunction(
   2668       ExternalReference::incremental_marking_record_write_function(isolate()),
   2669       argument_count);
   2670   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
   2671 }
   2672 
   2673 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
   2674     MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
   2675     Mode mode) {
   2676   Label on_black;
   2677   Label need_incremental;
   2678   Label need_incremental_pop_scratch;
   2679 
   2680   // Let's look at the color of the object:  If it is not black we don't have
   2681   // to inform the incremental marker.
   2682   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
   2683 
   2684   regs_.Restore(masm);
   2685   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   2686     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   2687                            MacroAssembler::kReturnAtEnd);
   2688   } else {
   2689     __ Ret();
   2690   }
   2691 
   2692   __ bind(&on_black);
   2693 
   2694   // Get the value from the slot.
   2695   __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
   2696 
   2697   if (mode == INCREMENTAL_COMPACTION) {
   2698     Label ensure_not_white;
   2699 
   2700     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
   2701                      regs_.scratch1(),  // Scratch.
   2702                      MemoryChunk::kEvacuationCandidateMask, eq,
   2703                      &ensure_not_white);
   2704 
   2705     __ CheckPageFlag(regs_.object(),
   2706                      regs_.scratch1(),  // Scratch.
   2707                      MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
   2708                      &need_incremental);
   2709 
   2710     __ bind(&ensure_not_white);
   2711   }
   2712 
   2713   // We need extra registers for this, so we push the object and the address
   2714   // register temporarily.
   2715   __ Push(regs_.object(), regs_.address());
   2716   __ JumpIfWhite(regs_.scratch0(),  // The value.
   2717                  regs_.scratch1(),  // Scratch.
   2718                  regs_.object(),    // Scratch.
   2719                  regs_.address(),   // Scratch.
   2720                  &need_incremental_pop_scratch);
   2721   __ Pop(regs_.object(), regs_.address());
   2722 
   2723   regs_.Restore(masm);
   2724   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
   2725     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
   2726                            MacroAssembler::kReturnAtEnd);
   2727   } else {
   2728     __ Ret();
   2729   }
   2730 
   2731   __ bind(&need_incremental_pop_scratch);
   2732   __ Pop(regs_.object(), regs_.address());
   2733 
   2734   __ bind(&need_incremental);
   2735 
   2736   // Fall through when we need to inform the incremental marker.
   2737 }
   2738 
   2739 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
   2740   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   2741   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   2742   int parameter_count_offset =
   2743       StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   2744   __ LoadP(r3, MemOperand(fp, parameter_count_offset));
   2745   if (function_mode() == JS_FUNCTION_STUB_MODE) {
   2746     __ AddP(r3, Operand(1));
   2747   }
   2748   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   2749   __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
   2750   __ la(sp, MemOperand(r3, sp));
   2751   __ Ret();
   2752 }
   2753 
   2754 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   2755   if (masm->isolate()->function_entry_hook() != NULL) {
   2756     PredictableCodeSizeScope predictable(masm,
   2757 #if V8_TARGET_ARCH_S390X
   2758                                          40);
   2759 #elif V8_HOST_ARCH_S390
   2760                                          36);
   2761 #else
   2762                                          32);
   2763 #endif
   2764     ProfileEntryHookStub stub(masm->isolate());
   2765     __ CleanseP(r14);
   2766     __ Push(r14, ip);
   2767     __ CallStub(&stub);  // BRASL
   2768     __ Pop(r14, ip);
   2769   }
   2770 }
   2771 
   2772 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   2773 // The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call.
   2774 #if V8_TARGET_ARCH_S390X
   2775   const int32_t kReturnAddressDistanceFromFunctionStart =
   2776       Assembler::kCallTargetAddressOffset + 18;  // LAY + STG * 2
   2777 #elif V8_HOST_ARCH_S390
   2778   const int32_t kReturnAddressDistanceFromFunctionStart =
   2779       Assembler::kCallTargetAddressOffset + 18;  // NILH + LAY + ST * 2
   2780 #else
   2781   const int32_t kReturnAddressDistanceFromFunctionStart =
   2782       Assembler::kCallTargetAddressOffset + 14;  // LAY + ST * 2
   2783 #endif
   2784 
   2785   // This should contain all kJSCallerSaved registers.
   2786   const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers.
   2787                              r7.bit();         // Saved stack pointer.
   2788 
   2789   // We also save r14+ip, so count here is one higher than the mask indicates.
   2790   const int32_t kNumSavedRegs = kNumJSCallerSaved + 3;
   2791 
   2792   // Save all caller-save registers as this may be called from anywhere.
   2793   __ CleanseP(r14);
   2794   __ LoadRR(ip, r14);
   2795   __ MultiPush(kSavedRegs | ip.bit());
   2796 
   2797   // Compute the function's address for the first argument.
   2798 
   2799   __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart));
   2800 
   2801   // The caller's return address is two slots above the saved temporaries.
   2802   // Grab that for the second argument to the hook.
   2803   __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize));
   2804 
   2805   // Align the stack if necessary.
   2806   int frame_alignment = masm->ActivationFrameAlignment();
   2807   if (frame_alignment > kPointerSize) {
   2808     __ LoadRR(r7, sp);
   2809     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   2810     __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
   2811   }
   2812 
   2813 #if !defined(USE_SIMULATOR)
   2814   uintptr_t entry_hook =
   2815       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
   2816   __ mov(ip, Operand(entry_hook));
   2817 
   2818 #if ABI_USES_FUNCTION_DESCRIPTORS
   2819   // Function descriptor
   2820   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
   2821   __ LoadP(ip, MemOperand(ip, 0));
   2822 // ip already set.
   2823 #endif
   2824 #endif
   2825 
   2826   // zLinux ABI requires caller's frame to have sufficient space for callee
   2827   // preserved regsiter save area.
   2828   __ LoadImmP(r0, Operand::Zero());
   2829   __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize -
   2830                                 kNumRequiredStackFrameSlots * kPointerSize));
   2831   __ StoreP(r0, MemOperand(sp));
   2832 #if defined(USE_SIMULATOR)
   2833   // Under the simulator we need to indirect the entry hook through a
   2834   // trampoline function at a known address.
   2835   // It additionally takes an isolate as a third parameter
   2836   __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
   2837 
   2838   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
   2839   __ mov(ip, Operand(ExternalReference(
   2840                  &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
   2841 #endif
   2842   __ Call(ip);
   2843 
   2844   // zLinux ABI requires caller's frame to have sufficient space for callee
   2845   // preserved regsiter save area.
   2846   __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize +
   2847                                kNumRequiredStackFrameSlots * kPointerSize));
   2848 
   2849   // Restore the stack pointer if needed.
   2850   if (frame_alignment > kPointerSize) {
   2851     __ LoadRR(sp, r7);
   2852   }
   2853 
   2854   // Also pop lr to get Ret(0).
   2855   __ MultiPop(kSavedRegs | ip.bit());
   2856   __ LoadRR(r14, ip);
   2857   __ Ret();
   2858 }
   2859 
   2860 template <class T>
   2861 static void CreateArrayDispatch(MacroAssembler* masm,
   2862                                 AllocationSiteOverrideMode mode) {
   2863   if (mode == DISABLE_ALLOCATION_SITES) {
   2864     T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
   2865     __ TailCallStub(&stub);
   2866   } else if (mode == DONT_OVERRIDE) {
   2867     int last_index =
   2868         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   2869     for (int i = 0; i <= last_index; ++i) {
   2870       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   2871       __ CmpP(r5, Operand(kind));
   2872       T stub(masm->isolate(), kind);
   2873       __ TailCallStub(&stub, eq);
   2874     }
   2875 
   2876     // If we reached this point there is a problem.
   2877     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   2878   } else {
   2879     UNREACHABLE();
   2880   }
   2881 }
   2882 
   2883 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
   2884                                            AllocationSiteOverrideMode mode) {
   2885   // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
   2886   // r5 - kind (if mode != DISABLE_ALLOCATION_SITES)
   2887   // r2 - number of arguments
   2888   // r3 - constructor?
   2889   // sp[0] - last argument
   2890   Label normal_sequence;
   2891   if (mode == DONT_OVERRIDE) {
   2892     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   2893     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   2894     STATIC_ASSERT(FAST_ELEMENTS == 2);
   2895     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   2896     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
   2897     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
   2898 
   2899     // is the low bit set? If so, we are holey and that is good.
   2900     __ AndP(r0, r5, Operand(1));
   2901     __ bne(&normal_sequence);
   2902   }
   2903 
   2904   // look at the first argument
   2905   __ LoadP(r7, MemOperand(sp, 0));
   2906   __ CmpP(r7, Operand::Zero());
   2907   __ beq(&normal_sequence);
   2908 
   2909   if (mode == DISABLE_ALLOCATION_SITES) {
   2910     ElementsKind initial = GetInitialFastElementsKind();
   2911     ElementsKind holey_initial = GetHoleyElementsKind(initial);
   2912 
   2913     ArraySingleArgumentConstructorStub stub_holey(
   2914         masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
   2915     __ TailCallStub(&stub_holey);
   2916 
   2917     __ bind(&normal_sequence);
   2918     ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
   2919                                             DISABLE_ALLOCATION_SITES);
   2920     __ TailCallStub(&stub);
   2921   } else if (mode == DONT_OVERRIDE) {
   2922     // We are going to create a holey array, but our kind is non-holey.
   2923     // Fix kind and retry (only if we have an allocation site in the slot).
   2924     __ AddP(r5, r5, Operand(1));
   2925     if (FLAG_debug_code) {
   2926       __ LoadP(r7, FieldMemOperand(r4, 0));
   2927       __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
   2928       __ Assert(eq, kExpectedAllocationSite);
   2929     }
   2930 
   2931     // Save the resulting elements kind in type info. We can't just store r5
   2932     // in the AllocationSite::transition_info field because elements kind is
   2933     // restricted to a portion of the field...upper bits need to be left alone.
   2934     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   2935     __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
   2936     __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
   2937     __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
   2938 
   2939     __ bind(&normal_sequence);
   2940     int last_index =
   2941         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   2942     for (int i = 0; i <= last_index; ++i) {
   2943       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   2944       __ CmpP(r5, Operand(kind));
   2945       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
   2946       __ TailCallStub(&stub, eq);
   2947     }
   2948 
   2949     // If we reached this point there is a problem.
   2950     __ Abort(kUnexpectedElementsKindInArrayConstructor);
   2951   } else {
   2952     UNREACHABLE();
   2953   }
   2954 }
   2955 
   2956 template <class T>
   2957 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
   2958   int to_index =
   2959       GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   2960   for (int i = 0; i <= to_index; ++i) {
   2961     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
   2962     T stub(isolate, kind);
   2963     stub.GetCode();
   2964     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
   2965       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
   2966       stub1.GetCode();
   2967     }
   2968   }
   2969 }
   2970 
   2971 void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   2972   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
   2973       isolate);
   2974   ArrayNArgumentsConstructorStub stub(isolate);
   2975   stub.GetCode();
   2976   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
   2977   for (int i = 0; i < 2; i++) {
   2978     // For internal arrays we only need a few things
   2979     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
   2980     stubh1.GetCode();
   2981     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
   2982     stubh2.GetCode();
   2983   }
   2984 }
   2985 
   2986 void ArrayConstructorStub::GenerateDispatchToArrayStub(
   2987     MacroAssembler* masm, AllocationSiteOverrideMode mode) {
   2988   Label not_zero_case, not_one_case;
   2989   __ CmpP(r2, Operand::Zero());
   2990   __ bne(&not_zero_case);
   2991   CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   2992 
   2993   __ bind(&not_zero_case);
   2994   __ CmpP(r2, Operand(1));
   2995   __ bgt(&not_one_case);
   2996   CreateArrayDispatchOneArgument(masm, mode);
   2997 
   2998   __ bind(&not_one_case);
   2999   ArrayNArgumentsConstructorStub stub(masm->isolate());
   3000   __ TailCallStub(&stub);
   3001 }
   3002 
   3003 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   3004   // ----------- S t a t e -------------
   3005   //  -- r2 : argc (only if argument_count() == ANY)
   3006   //  -- r3 : constructor
   3007   //  -- r4 : AllocationSite or undefined
   3008   //  -- r5 : new target
   3009   //  -- sp[0] : return address
   3010   //  -- sp[4] : last argument
   3011   // -----------------------------------
   3012 
   3013   if (FLAG_debug_code) {
   3014     // The array construct code is only set for the global and natives
   3015     // builtin Array functions which always have maps.
   3016 
   3017     // Initial map for the builtin Array function should be a map.
   3018     __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
   3019     // Will both indicate a NULL and a Smi.
   3020     __ TestIfSmi(r6);
   3021     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
   3022     __ CompareObjectType(r6, r6, r7, MAP_TYPE);
   3023     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
   3024 
   3025     // We should either have undefined in r4 or a valid AllocationSite
   3026     __ AssertUndefinedOrAllocationSite(r4, r6);
   3027   }
   3028 
   3029   // Enter the context of the Array function.
   3030   __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
   3031 
   3032   Label subclassing;
   3033   __ CmpP(r5, r3);
   3034   __ bne(&subclassing, Label::kNear);
   3035 
   3036   Label no_info;
   3037   // Get the elements kind and case on that.
   3038   __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
   3039   __ beq(&no_info);
   3040 
   3041   __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
   3042   __ SmiUntag(r5);
   3043   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
   3044   __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
   3045   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
   3046 
   3047   __ bind(&no_info);
   3048   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
   3049 
   3050   __ bind(&subclassing);
   3051   __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
   3052   __ StoreP(r3, MemOperand(sp, r1));
   3053   __ AddP(r2, r2, Operand(3));
   3054   __ Push(r5, r4);
   3055   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
   3056 }
   3057 
   3058 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
   3059                                                 ElementsKind kind) {
   3060   __ CmpLogicalP(r2, Operand(1));
   3061 
   3062   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   3063   __ TailCallStub(&stub0, lt);
   3064 
   3065   ArrayNArgumentsConstructorStub stubN(isolate());
   3066   __ TailCallStub(&stubN, gt);
   3067 
   3068   if (IsFastPackedElementsKind(kind)) {
   3069     // We might need to create a holey array
   3070     // look at the first argument
   3071     __ LoadP(r5, MemOperand(sp, 0));
   3072     __ CmpP(r5, Operand::Zero());
   3073 
   3074     InternalArraySingleArgumentConstructorStub stub1_holey(
   3075         isolate(), GetHoleyElementsKind(kind));
   3076     __ TailCallStub(&stub1_holey, ne);
   3077   }
   3078 
   3079   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
   3080   __ TailCallStub(&stub1);
   3081 }
   3082 
   3083 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   3084   // ----------- S t a t e -------------
   3085   //  -- r2 : argc
   3086   //  -- r3 : constructor
   3087   //  -- sp[0] : return address
   3088   //  -- sp[4] : last argument
   3089   // -----------------------------------
   3090 
   3091   if (FLAG_debug_code) {
   3092     // The array construct code is only set for the global and natives
   3093     // builtin Array functions which always have maps.
   3094 
   3095     // Initial map for the builtin Array function should be a map.
   3096     __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
   3097     // Will both indicate a NULL and a Smi.
   3098     __ TestIfSmi(r5);
   3099     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
   3100     __ CompareObjectType(r5, r5, r6, MAP_TYPE);
   3101     __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
   3102   }
   3103 
   3104   // Figure out the right elements kind
   3105   __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
   3106   // Load the map's "bit field 2" into |result|.
   3107   __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
   3108   // Retrieve elements_kind from bit field 2.
   3109   __ DecodeField<Map::ElementsKindBits>(r5);
   3110 
   3111   if (FLAG_debug_code) {
   3112     Label done;
   3113     __ CmpP(r5, Operand(FAST_ELEMENTS));
   3114     __ beq(&done);
   3115     __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS));
   3116     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
   3117     __ bind(&done);
   3118   }
   3119 
   3120   Label fast_elements_case;
   3121   __ CmpP(r5, Operand(FAST_ELEMENTS));
   3122   __ beq(&fast_elements_case);
   3123   GenerateCase(masm, FAST_HOLEY_ELEMENTS);
   3124 
   3125   __ bind(&fast_elements_case);
   3126   GenerateCase(masm, FAST_ELEMENTS);
   3127 }
   3128 
   3129 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   3130   return ref0.address() - ref1.address();
   3131 }
   3132 
   3133 // Calls an API function.  Allocates HandleScope, extracts returned value
   3134 // from handle and propagates exceptions.  Restores context.  stack_space
   3135 // - space to be unwound on exit (includes the call JS arguments space and
   3136 // the additional space allocated for the fast call).
   3137 static void CallApiFunctionAndReturn(MacroAssembler* masm,
   3138                                      Register function_address,
   3139                                      ExternalReference thunk_ref,
   3140                                      int stack_space,
   3141                                      MemOperand* stack_space_operand,
   3142                                      MemOperand return_value_operand,
   3143                                      MemOperand* context_restore_operand) {
   3144   Isolate* isolate = masm->isolate();
   3145   ExternalReference next_address =
   3146       ExternalReference::handle_scope_next_address(isolate);
   3147   const int kNextOffset = 0;
   3148   const int kLimitOffset = AddressOffset(
   3149       ExternalReference::handle_scope_limit_address(isolate), next_address);
   3150   const int kLevelOffset = AddressOffset(
   3151       ExternalReference::handle_scope_level_address(isolate), next_address);
   3152 
   3153   // Additional parameter is the address of the actual callback.
   3154   DCHECK(function_address.is(r3) || function_address.is(r4));
   3155   Register scratch = r5;
   3156 
   3157   __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
   3158   __ LoadlB(scratch, MemOperand(scratch, 0));
   3159   __ CmpP(scratch, Operand::Zero());
   3160 
   3161   Label profiler_disabled;
   3162   Label end_profiler_check;
   3163   __ beq(&profiler_disabled, Label::kNear);
   3164   __ mov(scratch, Operand(thunk_ref));
   3165   __ b(&end_profiler_check, Label::kNear);
   3166   __ bind(&profiler_disabled);
   3167   __ LoadRR(scratch, function_address);
   3168   __ bind(&end_profiler_check);
   3169 
   3170   // Allocate HandleScope in callee-save registers.
   3171   // r9 - next_address
   3172   // r6 - next_address->kNextOffset
   3173   // r7 - next_address->kLimitOffset
   3174   // r8 - next_address->kLevelOffset
   3175   __ mov(r9, Operand(next_address));
   3176   __ LoadP(r6, MemOperand(r9, kNextOffset));
   3177   __ LoadP(r7, MemOperand(r9, kLimitOffset));
   3178   __ LoadlW(r8, MemOperand(r9, kLevelOffset));
   3179   __ AddP(r8, Operand(1));
   3180   __ StoreW(r8, MemOperand(r9, kLevelOffset));
   3181 
   3182   if (FLAG_log_timer_events) {
   3183     FrameScope frame(masm, StackFrame::MANUAL);
   3184     __ PushSafepointRegisters();
   3185     __ PrepareCallCFunction(1, r2);
   3186     __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
   3187     __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
   3188                      1);
   3189     __ PopSafepointRegisters();
   3190   }
   3191 
   3192   // Native call returns to the DirectCEntry stub which redirects to the
   3193   // return address pushed on stack (could have moved after GC).
   3194   // DirectCEntry stub itself is generated early and never moves.
   3195   DirectCEntryStub stub(isolate);
   3196   stub.GenerateCall(masm, scratch);
   3197 
   3198   if (FLAG_log_timer_events) {
   3199     FrameScope frame(masm, StackFrame::MANUAL);
   3200     __ PushSafepointRegisters();
   3201     __ PrepareCallCFunction(1, r2);
   3202     __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
   3203     __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
   3204                      1);
   3205     __ PopSafepointRegisters();
   3206   }
   3207 
   3208   Label promote_scheduled_exception;
   3209   Label delete_allocated_handles;
   3210   Label leave_exit_frame;
   3211   Label return_value_loaded;
   3212 
   3213   // load value from ReturnValue
   3214   __ LoadP(r2, return_value_operand);
   3215   __ bind(&return_value_loaded);
   3216   // No more valid handles (the result handle was the last one). Restore
   3217   // previous handle scope.
   3218   __ StoreP(r6, MemOperand(r9, kNextOffset));
   3219   if (__ emit_debug_code()) {
   3220     __ LoadlW(r3, MemOperand(r9, kLevelOffset));
   3221     __ CmpP(r3, r8);
   3222     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
   3223   }
   3224   __ SubP(r8, Operand(1));
   3225   __ StoreW(r8, MemOperand(r9, kLevelOffset));
   3226   __ CmpP(r7, MemOperand(r9, kLimitOffset));
   3227   __ bne(&delete_allocated_handles, Label::kNear);
   3228 
   3229   // Leave the API exit frame.
   3230   __ bind(&leave_exit_frame);
   3231   bool restore_context = context_restore_operand != NULL;
   3232   if (restore_context) {
   3233     __ LoadP(cp, *context_restore_operand);
   3234   }
   3235   // LeaveExitFrame expects unwind space to be in a register.
   3236   if (stack_space_operand != NULL) {
   3237     __ l(r6, *stack_space_operand);
   3238   } else {
   3239     __ mov(r6, Operand(stack_space));
   3240   }
   3241   __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL);
   3242 
   3243   // Check if the function scheduled an exception.
   3244   __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
   3245   __ LoadP(r7, MemOperand(r7));
   3246   __ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
   3247   __ bne(&promote_scheduled_exception, Label::kNear);
   3248 
   3249   __ b(r14);
   3250 
   3251   // Re-throw by promoting a scheduled exception.
   3252   __ bind(&promote_scheduled_exception);
   3253   __ TailCallRuntime(Runtime::kPromoteScheduledException);
   3254 
   3255   // HandleScope limit has changed. Delete allocated extensions.
   3256   __ bind(&delete_allocated_handles);
   3257   __ StoreP(r7, MemOperand(r9, kLimitOffset));
   3258   __ LoadRR(r6, r2);
   3259   __ PrepareCallCFunction(1, r7);
   3260   __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
   3261   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
   3262                    1);
   3263   __ LoadRR(r2, r6);
   3264   __ b(&leave_exit_frame, Label::kNear);
   3265 }
   3266 
   3267 void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   3268   // ----------- S t a t e -------------
   3269   //  -- r2                  : callee
   3270   //  -- r6                  : call_data
   3271   //  -- r4                  : holder
   3272   //  -- r3                  : api_function_address
   3273   //  -- cp                  : context
   3274   //  --
   3275   //  -- sp[0]               : last argument
   3276   //  -- ...
   3277   //  -- sp[(argc - 1)* 4]   : first argument
   3278   //  -- sp[argc * 4]        : receiver
   3279   // -----------------------------------
   3280 
   3281   Register callee = r2;
   3282   Register call_data = r6;
   3283   Register holder = r4;
   3284   Register api_function_address = r3;
   3285   Register context = cp;
   3286 
   3287   typedef FunctionCallbackArguments FCA;
   3288 
   3289   STATIC_ASSERT(FCA::kContextSaveIndex == 6);
   3290   STATIC_ASSERT(FCA::kCalleeIndex == 5);
   3291   STATIC_ASSERT(FCA::kDataIndex == 4);
   3292   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
   3293   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   3294   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   3295   STATIC_ASSERT(FCA::kHolderIndex == 0);
   3296   STATIC_ASSERT(FCA::kNewTargetIndex == 7);
   3297   STATIC_ASSERT(FCA::kArgsLength == 8);
   3298 
   3299   // new target
   3300   __ PushRoot(Heap::kUndefinedValueRootIndex);
   3301 
   3302   // context save
   3303   __ push(context);
   3304   if (!is_lazy()) {
   3305     // load context from callee
   3306     __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   3307   }
   3308 
   3309   // callee
   3310   __ push(callee);
   3311 
   3312   // call data
   3313   __ push(call_data);
   3314 
   3315   Register scratch = call_data;
   3316   if (!call_data_undefined()) {
   3317     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3318   }
   3319   // return value
   3320   __ push(scratch);
   3321   // return value default
   3322   __ push(scratch);
   3323   // isolate
   3324   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
   3325   __ push(scratch);
   3326   // holder
   3327   __ push(holder);
   3328 
   3329   // Prepare arguments.
   3330   __ LoadRR(scratch, sp);
   3331 
   3332   // Allocate the v8::Arguments structure in the arguments' space since
   3333   // it's not controlled by GC.
   3334   // S390 LINUX ABI:
   3335   //
   3336   // Create 4 extra slots on stack:
   3337   //    [0] space for DirectCEntryStub's LR save
   3338   //    [1-3] FunctionCallbackInfo
   3339   const int kApiStackSpace = 4;
   3340   const int kFunctionCallbackInfoOffset =
   3341       (kStackFrameExtraParamSlot + 1) * kPointerSize;
   3342 
   3343   FrameScope frame_scope(masm, StackFrame::MANUAL);
   3344   __ EnterExitFrame(false, kApiStackSpace);
   3345 
   3346   DCHECK(!api_function_address.is(r2) && !scratch.is(r2));
   3347   // r2 = FunctionCallbackInfo&
   3348   // Arguments is after the return address.
   3349   __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
   3350   // FunctionCallbackInfo::implicit_args_
   3351   __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize));
   3352   // FunctionCallbackInfo::values_
   3353   __ AddP(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
   3354   __ StoreP(ip, MemOperand(r2, 1 * kPointerSize));
   3355   // FunctionCallbackInfo::length_ = argc
   3356   __ LoadImmP(ip, Operand(argc()));
   3357   __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
   3358 
   3359   ExternalReference thunk_ref =
   3360       ExternalReference::invoke_function_callback(masm->isolate());
   3361 
   3362   AllowExternalCallThatCantCauseGC scope(masm);
   3363   MemOperand context_restore_operand(
   3364       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   3365   // Stores return the first js argument
   3366   int return_value_offset = 0;
   3367   if (is_store()) {
   3368     return_value_offset = 2 + FCA::kArgsLength;
   3369   } else {
   3370     return_value_offset = 2 + FCA::kReturnValueOffset;
   3371   }
   3372   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   3373   int stack_space = 0;
   3374   MemOperand length_operand =
   3375       MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
   3376   MemOperand* stack_space_operand = &length_operand;
   3377   stack_space = argc() + FCA::kArgsLength + 1;
   3378   stack_space_operand = NULL;
   3379   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
   3380                            stack_space_operand, return_value_operand,
   3381                            &context_restore_operand);
   3382 }
   3383 
   3384 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   3385   int arg0Slot = 0;
   3386   int accessorInfoSlot = 0;
   3387   int apiStackSpace = 0;
   3388   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
   3389   // name below the exit frame to make GC aware of them.
   3390   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
   3391   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
   3392   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
   3393   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
   3394   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
   3395   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
   3396   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
   3397   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
   3398 
   3399   Register receiver = ApiGetterDescriptor::ReceiverRegister();
   3400   Register holder = ApiGetterDescriptor::HolderRegister();
   3401   Register callback = ApiGetterDescriptor::CallbackRegister();
   3402   Register scratch = r6;
   3403   DCHECK(!AreAliased(receiver, holder, callback, scratch));
   3404 
   3405   Register api_function_address = r4;
   3406 
   3407   __ push(receiver);
   3408   // Push data from AccessorInfo.
   3409   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
   3410   __ push(scratch);
   3411   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3412   __ Push(scratch, scratch);
   3413   __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
   3414   __ Push(scratch, holder);
   3415   __ Push(Smi::kZero);  // should_throw_on_error -> false
   3416   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
   3417   __ push(scratch);
   3418 
   3419   // v8::PropertyCallbackInfo::args_ array and name handle.
   3420   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
   3421 
   3422   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
   3423   __ LoadRR(r2, sp);                           // r2 = Handle<Name>
   3424   __ AddP(r3, r2, Operand(1 * kPointerSize));  // r3 = v8::PCI::args_
   3425 
   3426   // If ABI passes Handles (pointer-sized struct) in a register:
   3427   //
   3428   // Create 2 extra slots on stack:
   3429   //    [0] space for DirectCEntryStub's LR save
   3430   //    [1] AccessorInfo&
   3431   //
   3432   // Otherwise:
   3433   //
   3434   // Create 3 extra slots on stack:
   3435   //    [0] space for DirectCEntryStub's LR save
   3436   //    [1] copy of Handle (first arg)
   3437   //    [2] AccessorInfo&
   3438   if (ABI_PASSES_HANDLES_IN_REGS) {
   3439     accessorInfoSlot = kStackFrameExtraParamSlot + 1;
   3440     apiStackSpace = 2;
   3441   } else {
   3442     arg0Slot = kStackFrameExtraParamSlot + 1;
   3443     accessorInfoSlot = arg0Slot + 1;
   3444     apiStackSpace = 3;
   3445   }
   3446 
   3447   FrameScope frame_scope(masm, StackFrame::MANUAL);
   3448   __ EnterExitFrame(false, apiStackSpace);
   3449 
   3450   if (!ABI_PASSES_HANDLES_IN_REGS) {
   3451     // pass 1st arg by reference
   3452     __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
   3453     __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
   3454   }
   3455 
   3456   // Create v8::PropertyCallbackInfo object on the stack and initialize
   3457   // it's args_ field.
   3458   __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
   3459   __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
   3460   // r3 = v8::PropertyCallbackInfo&
   3461 
   3462   ExternalReference thunk_ref =
   3463       ExternalReference::invoke_accessor_getter_callback(isolate());
   3464 
   3465   __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
   3466   __ LoadP(api_function_address,
   3467            FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
   3468 
   3469   // +3 is to skip prolog, return address and name handle.
   3470   MemOperand return_value_operand(
   3471       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   3472   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
   3473                            kStackUnwindSpace, NULL, return_value_operand, NULL);
   3474 }
   3475 
   3476 #undef __
   3477 
   3478 }  // namespace internal
   3479 }  // namespace v8
   3480 
   3481 #endif  // V8_TARGET_ARCH_S390
   3482